Merge branch 'drm-next-5.1' of git://people.freedesktop.org/~agd5f/linux into drm...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2018 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/if.h>
35 #include <linux/if_vlan.h>
36 #include <linux/if_bridge.h>
37 #include <linux/rtc.h>
38 #include <linux/bpf.h>
39 #include <net/ip.h>
40 #include <net/tcp.h>
41 #include <net/udp.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <net/udp_tunnel.h>
45 #include <linux/workqueue.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50 #include <linux/bitmap.h>
51 #include <linux/cpu_rmap.h>
52 #include <linux/cpumask.h>
53 #include <net/pkt_cls.h>
54 #include <linux/hwmon.h>
55 #include <linux/hwmon-sysfs.h>
56
57 #include "bnxt_hsi.h"
58 #include "bnxt.h"
59 #include "bnxt_ulp.h"
60 #include "bnxt_sriov.h"
61 #include "bnxt_ethtool.h"
62 #include "bnxt_dcb.h"
63 #include "bnxt_xdp.h"
64 #include "bnxt_vfr.h"
65 #include "bnxt_tc.h"
66 #include "bnxt_devlink.h"
67 #include "bnxt_debugfs.h"
68
69 #define BNXT_TX_TIMEOUT         (5 * HZ)
70
71 static const char version[] =
72         "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
73
74 MODULE_LICENSE("GPL");
75 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77
78 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
79 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
80 #define BNXT_RX_COPY_THRESH 256
81
82 #define BNXT_TX_PUSH_THRESH 164
83
84 enum board_idx {
85         BCM57301,
86         BCM57302,
87         BCM57304,
88         BCM57417_NPAR,
89         BCM58700,
90         BCM57311,
91         BCM57312,
92         BCM57402,
93         BCM57404,
94         BCM57406,
95         BCM57402_NPAR,
96         BCM57407,
97         BCM57412,
98         BCM57414,
99         BCM57416,
100         BCM57417,
101         BCM57412_NPAR,
102         BCM57314,
103         BCM57417_SFP,
104         BCM57416_SFP,
105         BCM57404_NPAR,
106         BCM57406_NPAR,
107         BCM57407_SFP,
108         BCM57407_NPAR,
109         BCM57414_NPAR,
110         BCM57416_NPAR,
111         BCM57452,
112         BCM57454,
113         BCM5745x_NPAR,
114         BCM57508,
115         BCM58802,
116         BCM58804,
117         BCM58808,
118         NETXTREME_E_VF,
119         NETXTREME_C_VF,
120         NETXTREME_S_VF,
121         NETXTREME_E_P5_VF,
122 };
123
124 /* indexed by enum above */
125 static const struct {
126         char *name;
127 } board_info[] = {
128         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
129         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
130         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
131         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
132         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
133         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
134         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
135         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
136         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
137         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
138         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
139         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
140         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
141         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
142         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
143         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
144         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
145         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
146         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
147         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
148         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
149         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
150         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
151         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
152         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
153         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
154         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
155         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
156         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
157         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
158         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
159         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
160         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
161         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
162         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
163         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
164         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
165 };
166
167 static const struct pci_device_id bnxt_pci_tbl[] = {
168         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
169         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
170         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
171         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
172         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
173         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
174         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
175         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
176         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
177         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
178         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
179         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
180         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
181         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
182         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
183         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
184         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
185         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
186         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
187         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
188         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
189         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
190         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
191         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
192         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
193         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
194         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
195         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
196         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
197         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
198         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
199         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
200         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
201         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
202         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
203         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
204         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
205         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
206 #ifdef CONFIG_BNXT_SRIOV
207         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
208         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
209         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
210         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
211         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
212         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
213         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
214         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
215         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
216         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
217 #endif
218         { 0 }
219 };
220
221 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
222
223 static const u16 bnxt_vf_req_snif[] = {
224         HWRM_FUNC_CFG,
225         HWRM_FUNC_VF_CFG,
226         HWRM_PORT_PHY_QCFG,
227         HWRM_CFA_L2_FILTER_ALLOC,
228 };
229
230 static const u16 bnxt_async_events_arr[] = {
231         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
232         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
233         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
234         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
235         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
236 };
237
238 static struct workqueue_struct *bnxt_pf_wq;
239
240 static bool bnxt_vf_pciid(enum board_idx idx)
241 {
242         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
243                 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
244 }
245
246 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
247 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
248 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
249
250 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
251                 writel(DB_CP_IRQ_DIS_FLAGS, db)
252
253 #define BNXT_DB_CQ(db, idx)                                             \
254         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
255
256 #define BNXT_DB_NQ_P5(db, idx)                                          \
257         writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
258
259 #define BNXT_DB_CQ_ARM(db, idx)                                         \
260         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
261
262 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
263         writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
264
265 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
266 {
267         if (bp->flags & BNXT_FLAG_CHIP_P5)
268                 BNXT_DB_NQ_P5(db, idx);
269         else
270                 BNXT_DB_CQ(db, idx);
271 }
272
273 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
274 {
275         if (bp->flags & BNXT_FLAG_CHIP_P5)
276                 BNXT_DB_NQ_ARM_P5(db, idx);
277         else
278                 BNXT_DB_CQ_ARM(db, idx);
279 }
280
281 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
282 {
283         if (bp->flags & BNXT_FLAG_CHIP_P5)
284                 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
285                        db->doorbell);
286         else
287                 BNXT_DB_CQ(db, idx);
288 }
289
290 const u16 bnxt_lhint_arr[] = {
291         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
292         TX_BD_FLAGS_LHINT_512_TO_1023,
293         TX_BD_FLAGS_LHINT_1024_TO_2047,
294         TX_BD_FLAGS_LHINT_1024_TO_2047,
295         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
296         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
297         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
298         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
299         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
300         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
301         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
308         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
309         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
310 };
311
312 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
313 {
314         struct metadata_dst *md_dst = skb_metadata_dst(skb);
315
316         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
317                 return 0;
318
319         return md_dst->u.port_info.port_id;
320 }
321
322 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
323 {
324         struct bnxt *bp = netdev_priv(dev);
325         struct tx_bd *txbd;
326         struct tx_bd_ext *txbd1;
327         struct netdev_queue *txq;
328         int i;
329         dma_addr_t mapping;
330         unsigned int length, pad = 0;
331         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
332         u16 prod, last_frag;
333         struct pci_dev *pdev = bp->pdev;
334         struct bnxt_tx_ring_info *txr;
335         struct bnxt_sw_tx_bd *tx_buf;
336
337         i = skb_get_queue_mapping(skb);
338         if (unlikely(i >= bp->tx_nr_rings)) {
339                 dev_kfree_skb_any(skb);
340                 return NETDEV_TX_OK;
341         }
342
343         txq = netdev_get_tx_queue(dev, i);
344         txr = &bp->tx_ring[bp->tx_ring_map[i]];
345         prod = txr->tx_prod;
346
347         free_size = bnxt_tx_avail(bp, txr);
348         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
349                 netif_tx_stop_queue(txq);
350                 return NETDEV_TX_BUSY;
351         }
352
353         length = skb->len;
354         len = skb_headlen(skb);
355         last_frag = skb_shinfo(skb)->nr_frags;
356
357         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
358
359         txbd->tx_bd_opaque = prod;
360
361         tx_buf = &txr->tx_buf_ring[prod];
362         tx_buf->skb = skb;
363         tx_buf->nr_frags = last_frag;
364
365         vlan_tag_flags = 0;
366         cfa_action = bnxt_xmit_get_cfa_action(skb);
367         if (skb_vlan_tag_present(skb)) {
368                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
369                                  skb_vlan_tag_get(skb);
370                 /* Currently supports 8021Q, 8021AD vlan offloads
371                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
372                  */
373                 if (skb->vlan_proto == htons(ETH_P_8021Q))
374                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
375         }
376
377         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
378                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
379                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
380                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
381                 void __iomem *db = txr->tx_db.doorbell;
382                 void *pdata = tx_push_buf->data;
383                 u64 *end;
384                 int j, push_len;
385
386                 /* Set COAL_NOW to be ready quickly for the next push */
387                 tx_push->tx_bd_len_flags_type =
388                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
389                                         TX_BD_TYPE_LONG_TX_BD |
390                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
391                                         TX_BD_FLAGS_COAL_NOW |
392                                         TX_BD_FLAGS_PACKET_END |
393                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
394
395                 if (skb->ip_summed == CHECKSUM_PARTIAL)
396                         tx_push1->tx_bd_hsize_lflags =
397                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
398                 else
399                         tx_push1->tx_bd_hsize_lflags = 0;
400
401                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
402                 tx_push1->tx_bd_cfa_action =
403                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
404
405                 end = pdata + length;
406                 end = PTR_ALIGN(end, 8) - 1;
407                 *end = 0;
408
409                 skb_copy_from_linear_data(skb, pdata, len);
410                 pdata += len;
411                 for (j = 0; j < last_frag; j++) {
412                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
413                         void *fptr;
414
415                         fptr = skb_frag_address_safe(frag);
416                         if (!fptr)
417                                 goto normal_tx;
418
419                         memcpy(pdata, fptr, skb_frag_size(frag));
420                         pdata += skb_frag_size(frag);
421                 }
422
423                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
424                 txbd->tx_bd_haddr = txr->data_mapping;
425                 prod = NEXT_TX(prod);
426                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
427                 memcpy(txbd, tx_push1, sizeof(*txbd));
428                 prod = NEXT_TX(prod);
429                 tx_push->doorbell =
430                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
431                 txr->tx_prod = prod;
432
433                 tx_buf->is_push = 1;
434                 netdev_tx_sent_queue(txq, skb->len);
435                 wmb();  /* Sync is_push and byte queue before pushing data */
436
437                 push_len = (length + sizeof(*tx_push) + 7) / 8;
438                 if (push_len > 16) {
439                         __iowrite64_copy(db, tx_push_buf, 16);
440                         __iowrite32_copy(db + 4, tx_push_buf + 1,
441                                          (push_len - 16) << 1);
442                 } else {
443                         __iowrite64_copy(db, tx_push_buf, push_len);
444                 }
445
446                 goto tx_done;
447         }
448
449 normal_tx:
450         if (length < BNXT_MIN_PKT_SIZE) {
451                 pad = BNXT_MIN_PKT_SIZE - length;
452                 if (skb_pad(skb, pad)) {
453                         /* SKB already freed. */
454                         tx_buf->skb = NULL;
455                         return NETDEV_TX_OK;
456                 }
457                 length = BNXT_MIN_PKT_SIZE;
458         }
459
460         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
461
462         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
463                 dev_kfree_skb_any(skb);
464                 tx_buf->skb = NULL;
465                 return NETDEV_TX_OK;
466         }
467
468         dma_unmap_addr_set(tx_buf, mapping, mapping);
469         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
470                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
471
472         txbd->tx_bd_haddr = cpu_to_le64(mapping);
473
474         prod = NEXT_TX(prod);
475         txbd1 = (struct tx_bd_ext *)
476                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
477
478         txbd1->tx_bd_hsize_lflags = 0;
479         if (skb_is_gso(skb)) {
480                 u32 hdr_len;
481
482                 if (skb->encapsulation)
483                         hdr_len = skb_inner_network_offset(skb) +
484                                 skb_inner_network_header_len(skb) +
485                                 inner_tcp_hdrlen(skb);
486                 else
487                         hdr_len = skb_transport_offset(skb) +
488                                 tcp_hdrlen(skb);
489
490                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
491                                         TX_BD_FLAGS_T_IPID |
492                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
493                 length = skb_shinfo(skb)->gso_size;
494                 txbd1->tx_bd_mss = cpu_to_le32(length);
495                 length += hdr_len;
496         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
497                 txbd1->tx_bd_hsize_lflags =
498                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
499                 txbd1->tx_bd_mss = 0;
500         }
501
502         length >>= 9;
503         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
504                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
505                                      skb->len);
506                 i = 0;
507                 goto tx_dma_error;
508         }
509         flags |= bnxt_lhint_arr[length];
510         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
511
512         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
513         txbd1->tx_bd_cfa_action =
514                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
515         for (i = 0; i < last_frag; i++) {
516                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
517
518                 prod = NEXT_TX(prod);
519                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
520
521                 len = skb_frag_size(frag);
522                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
523                                            DMA_TO_DEVICE);
524
525                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
526                         goto tx_dma_error;
527
528                 tx_buf = &txr->tx_buf_ring[prod];
529                 dma_unmap_addr_set(tx_buf, mapping, mapping);
530
531                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
532
533                 flags = len << TX_BD_LEN_SHIFT;
534                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
535         }
536
537         flags &= ~TX_BD_LEN;
538         txbd->tx_bd_len_flags_type =
539                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
540                             TX_BD_FLAGS_PACKET_END);
541
542         netdev_tx_sent_queue(txq, skb->len);
543
544         /* Sync BD data before updating doorbell */
545         wmb();
546
547         prod = NEXT_TX(prod);
548         txr->tx_prod = prod;
549
550         if (!skb->xmit_more || netif_xmit_stopped(txq))
551                 bnxt_db_write(bp, &txr->tx_db, prod);
552
553 tx_done:
554
555         mmiowb();
556
557         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
558                 if (skb->xmit_more && !tx_buf->is_push)
559                         bnxt_db_write(bp, &txr->tx_db, prod);
560
561                 netif_tx_stop_queue(txq);
562
563                 /* netif_tx_stop_queue() must be done before checking
564                  * tx index in bnxt_tx_avail() below, because in
565                  * bnxt_tx_int(), we update tx index before checking for
566                  * netif_tx_queue_stopped().
567                  */
568                 smp_mb();
569                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
570                         netif_tx_wake_queue(txq);
571         }
572         return NETDEV_TX_OK;
573
574 tx_dma_error:
575         last_frag = i;
576
577         /* start back at beginning and unmap skb */
578         prod = txr->tx_prod;
579         tx_buf = &txr->tx_buf_ring[prod];
580         tx_buf->skb = NULL;
581         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
582                          skb_headlen(skb), PCI_DMA_TODEVICE);
583         prod = NEXT_TX(prod);
584
585         /* unmap remaining mapped pages */
586         for (i = 0; i < last_frag; i++) {
587                 prod = NEXT_TX(prod);
588                 tx_buf = &txr->tx_buf_ring[prod];
589                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
590                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
591                                PCI_DMA_TODEVICE);
592         }
593
594         dev_kfree_skb_any(skb);
595         return NETDEV_TX_OK;
596 }
597
598 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
599 {
600         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
601         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
602         u16 cons = txr->tx_cons;
603         struct pci_dev *pdev = bp->pdev;
604         int i;
605         unsigned int tx_bytes = 0;
606
607         for (i = 0; i < nr_pkts; i++) {
608                 struct bnxt_sw_tx_bd *tx_buf;
609                 struct sk_buff *skb;
610                 int j, last;
611
612                 tx_buf = &txr->tx_buf_ring[cons];
613                 cons = NEXT_TX(cons);
614                 skb = tx_buf->skb;
615                 tx_buf->skb = NULL;
616
617                 if (tx_buf->is_push) {
618                         tx_buf->is_push = 0;
619                         goto next_tx_int;
620                 }
621
622                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
623                                  skb_headlen(skb), PCI_DMA_TODEVICE);
624                 last = tx_buf->nr_frags;
625
626                 for (j = 0; j < last; j++) {
627                         cons = NEXT_TX(cons);
628                         tx_buf = &txr->tx_buf_ring[cons];
629                         dma_unmap_page(
630                                 &pdev->dev,
631                                 dma_unmap_addr(tx_buf, mapping),
632                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
633                                 PCI_DMA_TODEVICE);
634                 }
635
636 next_tx_int:
637                 cons = NEXT_TX(cons);
638
639                 tx_bytes += skb->len;
640                 dev_kfree_skb_any(skb);
641         }
642
643         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
644         txr->tx_cons = cons;
645
646         /* Need to make the tx_cons update visible to bnxt_start_xmit()
647          * before checking for netif_tx_queue_stopped().  Without the
648          * memory barrier, there is a small possibility that bnxt_start_xmit()
649          * will miss it and cause the queue to be stopped forever.
650          */
651         smp_mb();
652
653         if (unlikely(netif_tx_queue_stopped(txq)) &&
654             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
655                 __netif_tx_lock(txq, smp_processor_id());
656                 if (netif_tx_queue_stopped(txq) &&
657                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
658                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
659                         netif_tx_wake_queue(txq);
660                 __netif_tx_unlock(txq);
661         }
662 }
663
664 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
665                                          gfp_t gfp)
666 {
667         struct device *dev = &bp->pdev->dev;
668         struct page *page;
669
670         page = alloc_page(gfp);
671         if (!page)
672                 return NULL;
673
674         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
675                                       DMA_ATTR_WEAK_ORDERING);
676         if (dma_mapping_error(dev, *mapping)) {
677                 __free_page(page);
678                 return NULL;
679         }
680         *mapping += bp->rx_dma_offset;
681         return page;
682 }
683
684 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
685                                        gfp_t gfp)
686 {
687         u8 *data;
688         struct pci_dev *pdev = bp->pdev;
689
690         data = kmalloc(bp->rx_buf_size, gfp);
691         if (!data)
692                 return NULL;
693
694         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
695                                         bp->rx_buf_use_size, bp->rx_dir,
696                                         DMA_ATTR_WEAK_ORDERING);
697
698         if (dma_mapping_error(&pdev->dev, *mapping)) {
699                 kfree(data);
700                 data = NULL;
701         }
702         return data;
703 }
704
705 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
706                        u16 prod, gfp_t gfp)
707 {
708         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
709         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
710         dma_addr_t mapping;
711
712         if (BNXT_RX_PAGE_MODE(bp)) {
713                 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
714
715                 if (!page)
716                         return -ENOMEM;
717
718                 rx_buf->data = page;
719                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
720         } else {
721                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
722
723                 if (!data)
724                         return -ENOMEM;
725
726                 rx_buf->data = data;
727                 rx_buf->data_ptr = data + bp->rx_offset;
728         }
729         rx_buf->mapping = mapping;
730
731         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
732         return 0;
733 }
734
735 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
736 {
737         u16 prod = rxr->rx_prod;
738         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
739         struct rx_bd *cons_bd, *prod_bd;
740
741         prod_rx_buf = &rxr->rx_buf_ring[prod];
742         cons_rx_buf = &rxr->rx_buf_ring[cons];
743
744         prod_rx_buf->data = data;
745         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
746
747         prod_rx_buf->mapping = cons_rx_buf->mapping;
748
749         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
750         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
751
752         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
753 }
754
755 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
756 {
757         u16 next, max = rxr->rx_agg_bmap_size;
758
759         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
760         if (next >= max)
761                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
762         return next;
763 }
764
765 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
766                                      struct bnxt_rx_ring_info *rxr,
767                                      u16 prod, gfp_t gfp)
768 {
769         struct rx_bd *rxbd =
770                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
771         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
772         struct pci_dev *pdev = bp->pdev;
773         struct page *page;
774         dma_addr_t mapping;
775         u16 sw_prod = rxr->rx_sw_agg_prod;
776         unsigned int offset = 0;
777
778         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
779                 page = rxr->rx_page;
780                 if (!page) {
781                         page = alloc_page(gfp);
782                         if (!page)
783                                 return -ENOMEM;
784                         rxr->rx_page = page;
785                         rxr->rx_page_offset = 0;
786                 }
787                 offset = rxr->rx_page_offset;
788                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
789                 if (rxr->rx_page_offset == PAGE_SIZE)
790                         rxr->rx_page = NULL;
791                 else
792                         get_page(page);
793         } else {
794                 page = alloc_page(gfp);
795                 if (!page)
796                         return -ENOMEM;
797         }
798
799         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
800                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
801                                      DMA_ATTR_WEAK_ORDERING);
802         if (dma_mapping_error(&pdev->dev, mapping)) {
803                 __free_page(page);
804                 return -EIO;
805         }
806
807         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
808                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
809
810         __set_bit(sw_prod, rxr->rx_agg_bmap);
811         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
812         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
813
814         rx_agg_buf->page = page;
815         rx_agg_buf->offset = offset;
816         rx_agg_buf->mapping = mapping;
817         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
818         rxbd->rx_bd_opaque = sw_prod;
819         return 0;
820 }
821
822 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
823                                    u32 agg_bufs)
824 {
825         struct bnxt_napi *bnapi = cpr->bnapi;
826         struct bnxt *bp = bnapi->bp;
827         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
828         u16 prod = rxr->rx_agg_prod;
829         u16 sw_prod = rxr->rx_sw_agg_prod;
830         u32 i;
831
832         for (i = 0; i < agg_bufs; i++) {
833                 u16 cons;
834                 struct rx_agg_cmp *agg;
835                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
836                 struct rx_bd *prod_bd;
837                 struct page *page;
838
839                 agg = (struct rx_agg_cmp *)
840                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
841                 cons = agg->rx_agg_cmp_opaque;
842                 __clear_bit(cons, rxr->rx_agg_bmap);
843
844                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
845                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
846
847                 __set_bit(sw_prod, rxr->rx_agg_bmap);
848                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
849                 cons_rx_buf = &rxr->rx_agg_ring[cons];
850
851                 /* It is possible for sw_prod to be equal to cons, so
852                  * set cons_rx_buf->page to NULL first.
853                  */
854                 page = cons_rx_buf->page;
855                 cons_rx_buf->page = NULL;
856                 prod_rx_buf->page = page;
857                 prod_rx_buf->offset = cons_rx_buf->offset;
858
859                 prod_rx_buf->mapping = cons_rx_buf->mapping;
860
861                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
862
863                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
864                 prod_bd->rx_bd_opaque = sw_prod;
865
866                 prod = NEXT_RX_AGG(prod);
867                 sw_prod = NEXT_RX_AGG(sw_prod);
868                 cp_cons = NEXT_CMP(cp_cons);
869         }
870         rxr->rx_agg_prod = prod;
871         rxr->rx_sw_agg_prod = sw_prod;
872 }
873
874 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
875                                         struct bnxt_rx_ring_info *rxr,
876                                         u16 cons, void *data, u8 *data_ptr,
877                                         dma_addr_t dma_addr,
878                                         unsigned int offset_and_len)
879 {
880         unsigned int payload = offset_and_len >> 16;
881         unsigned int len = offset_and_len & 0xffff;
882         struct skb_frag_struct *frag;
883         struct page *page = data;
884         u16 prod = rxr->rx_prod;
885         struct sk_buff *skb;
886         int off, err;
887
888         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
889         if (unlikely(err)) {
890                 bnxt_reuse_rx_data(rxr, cons, data);
891                 return NULL;
892         }
893         dma_addr -= bp->rx_dma_offset;
894         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
895                              DMA_ATTR_WEAK_ORDERING);
896
897         if (unlikely(!payload))
898                 payload = eth_get_headlen(data_ptr, len);
899
900         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
901         if (!skb) {
902                 __free_page(page);
903                 return NULL;
904         }
905
906         off = (void *)data_ptr - page_address(page);
907         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
908         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
909                payload + NET_IP_ALIGN);
910
911         frag = &skb_shinfo(skb)->frags[0];
912         skb_frag_size_sub(frag, payload);
913         frag->page_offset += payload;
914         skb->data_len -= payload;
915         skb->tail += payload;
916
917         return skb;
918 }
919
920 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
921                                    struct bnxt_rx_ring_info *rxr, u16 cons,
922                                    void *data, u8 *data_ptr,
923                                    dma_addr_t dma_addr,
924                                    unsigned int offset_and_len)
925 {
926         u16 prod = rxr->rx_prod;
927         struct sk_buff *skb;
928         int err;
929
930         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
931         if (unlikely(err)) {
932                 bnxt_reuse_rx_data(rxr, cons, data);
933                 return NULL;
934         }
935
936         skb = build_skb(data, 0);
937         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
938                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
939         if (!skb) {
940                 kfree(data);
941                 return NULL;
942         }
943
944         skb_reserve(skb, bp->rx_offset);
945         skb_put(skb, offset_and_len & 0xffff);
946         return skb;
947 }
948
949 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
950                                      struct bnxt_cp_ring_info *cpr,
951                                      struct sk_buff *skb, u16 cp_cons,
952                                      u32 agg_bufs)
953 {
954         struct bnxt_napi *bnapi = cpr->bnapi;
955         struct pci_dev *pdev = bp->pdev;
956         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
957         u16 prod = rxr->rx_agg_prod;
958         u32 i;
959
960         for (i = 0; i < agg_bufs; i++) {
961                 u16 cons, frag_len;
962                 struct rx_agg_cmp *agg;
963                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
964                 struct page *page;
965                 dma_addr_t mapping;
966
967                 agg = (struct rx_agg_cmp *)
968                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
969                 cons = agg->rx_agg_cmp_opaque;
970                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
971                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
972
973                 cons_rx_buf = &rxr->rx_agg_ring[cons];
974                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
975                                    cons_rx_buf->offset, frag_len);
976                 __clear_bit(cons, rxr->rx_agg_bmap);
977
978                 /* It is possible for bnxt_alloc_rx_page() to allocate
979                  * a sw_prod index that equals the cons index, so we
980                  * need to clear the cons entry now.
981                  */
982                 mapping = cons_rx_buf->mapping;
983                 page = cons_rx_buf->page;
984                 cons_rx_buf->page = NULL;
985
986                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
987                         struct skb_shared_info *shinfo;
988                         unsigned int nr_frags;
989
990                         shinfo = skb_shinfo(skb);
991                         nr_frags = --shinfo->nr_frags;
992                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
993
994                         dev_kfree_skb(skb);
995
996                         cons_rx_buf->page = page;
997
998                         /* Update prod since possibly some pages have been
999                          * allocated already.
1000                          */
1001                         rxr->rx_agg_prod = prod;
1002                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
1003                         return NULL;
1004                 }
1005
1006                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1007                                      PCI_DMA_FROMDEVICE,
1008                                      DMA_ATTR_WEAK_ORDERING);
1009
1010                 skb->data_len += frag_len;
1011                 skb->len += frag_len;
1012                 skb->truesize += PAGE_SIZE;
1013
1014                 prod = NEXT_RX_AGG(prod);
1015                 cp_cons = NEXT_CMP(cp_cons);
1016         }
1017         rxr->rx_agg_prod = prod;
1018         return skb;
1019 }
1020
1021 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1022                                u8 agg_bufs, u32 *raw_cons)
1023 {
1024         u16 last;
1025         struct rx_agg_cmp *agg;
1026
1027         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1028         last = RING_CMP(*raw_cons);
1029         agg = (struct rx_agg_cmp *)
1030                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1031         return RX_AGG_CMP_VALID(agg, *raw_cons);
1032 }
1033
1034 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1035                                             unsigned int len,
1036                                             dma_addr_t mapping)
1037 {
1038         struct bnxt *bp = bnapi->bp;
1039         struct pci_dev *pdev = bp->pdev;
1040         struct sk_buff *skb;
1041
1042         skb = napi_alloc_skb(&bnapi->napi, len);
1043         if (!skb)
1044                 return NULL;
1045
1046         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1047                                 bp->rx_dir);
1048
1049         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1050                len + NET_IP_ALIGN);
1051
1052         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1053                                    bp->rx_dir);
1054
1055         skb_put(skb, len);
1056         return skb;
1057 }
1058
1059 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1060                            u32 *raw_cons, void *cmp)
1061 {
1062         struct rx_cmp *rxcmp = cmp;
1063         u32 tmp_raw_cons = *raw_cons;
1064         u8 cmp_type, agg_bufs = 0;
1065
1066         cmp_type = RX_CMP_TYPE(rxcmp);
1067
1068         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1069                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1070                             RX_CMP_AGG_BUFS) >>
1071                            RX_CMP_AGG_BUFS_SHIFT;
1072         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1073                 struct rx_tpa_end_cmp *tpa_end = cmp;
1074
1075                 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1076                             RX_TPA_END_CMP_AGG_BUFS) >>
1077                            RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1078         }
1079
1080         if (agg_bufs) {
1081                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1082                         return -EBUSY;
1083         }
1084         *raw_cons = tmp_raw_cons;
1085         return 0;
1086 }
1087
1088 static void bnxt_queue_sp_work(struct bnxt *bp)
1089 {
1090         if (BNXT_PF(bp))
1091                 queue_work(bnxt_pf_wq, &bp->sp_task);
1092         else
1093                 schedule_work(&bp->sp_task);
1094 }
1095
1096 static void bnxt_cancel_sp_work(struct bnxt *bp)
1097 {
1098         if (BNXT_PF(bp))
1099                 flush_workqueue(bnxt_pf_wq);
1100         else
1101                 cancel_work_sync(&bp->sp_task);
1102 }
1103
1104 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1105 {
1106         if (!rxr->bnapi->in_reset) {
1107                 rxr->bnapi->in_reset = true;
1108                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1109                 bnxt_queue_sp_work(bp);
1110         }
1111         rxr->rx_next_cons = 0xffff;
1112 }
1113
1114 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1115                            struct rx_tpa_start_cmp *tpa_start,
1116                            struct rx_tpa_start_cmp_ext *tpa_start1)
1117 {
1118         u8 agg_id = TPA_START_AGG_ID(tpa_start);
1119         u16 cons, prod;
1120         struct bnxt_tpa_info *tpa_info;
1121         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1122         struct rx_bd *prod_bd;
1123         dma_addr_t mapping;
1124
1125         cons = tpa_start->rx_tpa_start_cmp_opaque;
1126         prod = rxr->rx_prod;
1127         cons_rx_buf = &rxr->rx_buf_ring[cons];
1128         prod_rx_buf = &rxr->rx_buf_ring[prod];
1129         tpa_info = &rxr->rx_tpa[agg_id];
1130
1131         if (unlikely(cons != rxr->rx_next_cons)) {
1132                 bnxt_sched_reset(bp, rxr);
1133                 return;
1134         }
1135         /* Store cfa_code in tpa_info to use in tpa_end
1136          * completion processing.
1137          */
1138         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1139         prod_rx_buf->data = tpa_info->data;
1140         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1141
1142         mapping = tpa_info->mapping;
1143         prod_rx_buf->mapping = mapping;
1144
1145         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1146
1147         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1148
1149         tpa_info->data = cons_rx_buf->data;
1150         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1151         cons_rx_buf->data = NULL;
1152         tpa_info->mapping = cons_rx_buf->mapping;
1153
1154         tpa_info->len =
1155                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1156                                 RX_TPA_START_CMP_LEN_SHIFT;
1157         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1158                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1159
1160                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1161                 tpa_info->gso_type = SKB_GSO_TCPV4;
1162                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1163                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1164                         tpa_info->gso_type = SKB_GSO_TCPV6;
1165                 tpa_info->rss_hash =
1166                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1167         } else {
1168                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1169                 tpa_info->gso_type = 0;
1170                 if (netif_msg_rx_err(bp))
1171                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1172         }
1173         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1174         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1175         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1176
1177         rxr->rx_prod = NEXT_RX(prod);
1178         cons = NEXT_RX(cons);
1179         rxr->rx_next_cons = NEXT_RX(cons);
1180         cons_rx_buf = &rxr->rx_buf_ring[cons];
1181
1182         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1183         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1184         cons_rx_buf->data = NULL;
1185 }
1186
1187 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
1188                            u32 agg_bufs)
1189 {
1190         if (agg_bufs)
1191                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
1192 }
1193
1194 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1195                                            int payload_off, int tcp_ts,
1196                                            struct sk_buff *skb)
1197 {
1198 #ifdef CONFIG_INET
1199         struct tcphdr *th;
1200         int len, nw_off;
1201         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1202         u32 hdr_info = tpa_info->hdr_info;
1203         bool loopback = false;
1204
1205         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1206         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1207         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1208
1209         /* If the packet is an internal loopback packet, the offsets will
1210          * have an extra 4 bytes.
1211          */
1212         if (inner_mac_off == 4) {
1213                 loopback = true;
1214         } else if (inner_mac_off > 4) {
1215                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1216                                             ETH_HLEN - 2));
1217
1218                 /* We only support inner iPv4/ipv6.  If we don't see the
1219                  * correct protocol ID, it must be a loopback packet where
1220                  * the offsets are off by 4.
1221                  */
1222                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1223                         loopback = true;
1224         }
1225         if (loopback) {
1226                 /* internal loopback packet, subtract all offsets by 4 */
1227                 inner_ip_off -= 4;
1228                 inner_mac_off -= 4;
1229                 outer_ip_off -= 4;
1230         }
1231
1232         nw_off = inner_ip_off - ETH_HLEN;
1233         skb_set_network_header(skb, nw_off);
1234         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1235                 struct ipv6hdr *iph = ipv6_hdr(skb);
1236
1237                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1238                 len = skb->len - skb_transport_offset(skb);
1239                 th = tcp_hdr(skb);
1240                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1241         } else {
1242                 struct iphdr *iph = ip_hdr(skb);
1243
1244                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1245                 len = skb->len - skb_transport_offset(skb);
1246                 th = tcp_hdr(skb);
1247                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1248         }
1249
1250         if (inner_mac_off) { /* tunnel */
1251                 struct udphdr *uh = NULL;
1252                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1253                                             ETH_HLEN - 2));
1254
1255                 if (proto == htons(ETH_P_IP)) {
1256                         struct iphdr *iph = (struct iphdr *)skb->data;
1257
1258                         if (iph->protocol == IPPROTO_UDP)
1259                                 uh = (struct udphdr *)(iph + 1);
1260                 } else {
1261                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1262
1263                         if (iph->nexthdr == IPPROTO_UDP)
1264                                 uh = (struct udphdr *)(iph + 1);
1265                 }
1266                 if (uh) {
1267                         if (uh->check)
1268                                 skb_shinfo(skb)->gso_type |=
1269                                         SKB_GSO_UDP_TUNNEL_CSUM;
1270                         else
1271                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1272                 }
1273         }
1274 #endif
1275         return skb;
1276 }
1277
1278 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1279 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1280
1281 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1282                                            int payload_off, int tcp_ts,
1283                                            struct sk_buff *skb)
1284 {
1285 #ifdef CONFIG_INET
1286         struct tcphdr *th;
1287         int len, nw_off, tcp_opt_len = 0;
1288
1289         if (tcp_ts)
1290                 tcp_opt_len = 12;
1291
1292         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1293                 struct iphdr *iph;
1294
1295                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1296                          ETH_HLEN;
1297                 skb_set_network_header(skb, nw_off);
1298                 iph = ip_hdr(skb);
1299                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1300                 len = skb->len - skb_transport_offset(skb);
1301                 th = tcp_hdr(skb);
1302                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1303         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1304                 struct ipv6hdr *iph;
1305
1306                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1307                          ETH_HLEN;
1308                 skb_set_network_header(skb, nw_off);
1309                 iph = ipv6_hdr(skb);
1310                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1311                 len = skb->len - skb_transport_offset(skb);
1312                 th = tcp_hdr(skb);
1313                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1314         } else {
1315                 dev_kfree_skb_any(skb);
1316                 return NULL;
1317         }
1318
1319         if (nw_off) { /* tunnel */
1320                 struct udphdr *uh = NULL;
1321
1322                 if (skb->protocol == htons(ETH_P_IP)) {
1323                         struct iphdr *iph = (struct iphdr *)skb->data;
1324
1325                         if (iph->protocol == IPPROTO_UDP)
1326                                 uh = (struct udphdr *)(iph + 1);
1327                 } else {
1328                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1329
1330                         if (iph->nexthdr == IPPROTO_UDP)
1331                                 uh = (struct udphdr *)(iph + 1);
1332                 }
1333                 if (uh) {
1334                         if (uh->check)
1335                                 skb_shinfo(skb)->gso_type |=
1336                                         SKB_GSO_UDP_TUNNEL_CSUM;
1337                         else
1338                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1339                 }
1340         }
1341 #endif
1342         return skb;
1343 }
1344
1345 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1346                                            struct bnxt_tpa_info *tpa_info,
1347                                            struct rx_tpa_end_cmp *tpa_end,
1348                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1349                                            struct sk_buff *skb)
1350 {
1351 #ifdef CONFIG_INET
1352         int payload_off;
1353         u16 segs;
1354
1355         segs = TPA_END_TPA_SEGS(tpa_end);
1356         if (segs == 1)
1357                 return skb;
1358
1359         NAPI_GRO_CB(skb)->count = segs;
1360         skb_shinfo(skb)->gso_size =
1361                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1362         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1363         payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1364                        RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1365                       RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1366         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1367         if (likely(skb))
1368                 tcp_gro_complete(skb);
1369 #endif
1370         return skb;
1371 }
1372
1373 /* Given the cfa_code of a received packet determine which
1374  * netdev (vf-rep or PF) the packet is destined to.
1375  */
1376 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1377 {
1378         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1379
1380         /* if vf-rep dev is NULL, the must belongs to the PF */
1381         return dev ? dev : bp->dev;
1382 }
1383
1384 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1385                                            struct bnxt_cp_ring_info *cpr,
1386                                            u32 *raw_cons,
1387                                            struct rx_tpa_end_cmp *tpa_end,
1388                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1389                                            u8 *event)
1390 {
1391         struct bnxt_napi *bnapi = cpr->bnapi;
1392         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1393         u8 agg_id = TPA_END_AGG_ID(tpa_end);
1394         u8 *data_ptr, agg_bufs;
1395         u16 cp_cons = RING_CMP(*raw_cons);
1396         unsigned int len;
1397         struct bnxt_tpa_info *tpa_info;
1398         dma_addr_t mapping;
1399         struct sk_buff *skb;
1400         void *data;
1401
1402         if (unlikely(bnapi->in_reset)) {
1403                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1404
1405                 if (rc < 0)
1406                         return ERR_PTR(-EBUSY);
1407                 return NULL;
1408         }
1409
1410         tpa_info = &rxr->rx_tpa[agg_id];
1411         data = tpa_info->data;
1412         data_ptr = tpa_info->data_ptr;
1413         prefetch(data_ptr);
1414         len = tpa_info->len;
1415         mapping = tpa_info->mapping;
1416
1417         agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1418                     RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1419
1420         if (agg_bufs) {
1421                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1422                         return ERR_PTR(-EBUSY);
1423
1424                 *event |= BNXT_AGG_EVENT;
1425                 cp_cons = NEXT_CMP(cp_cons);
1426         }
1427
1428         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1429                 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1430                 if (agg_bufs > MAX_SKB_FRAGS)
1431                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1432                                     agg_bufs, (int)MAX_SKB_FRAGS);
1433                 return NULL;
1434         }
1435
1436         if (len <= bp->rx_copy_thresh) {
1437                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1438                 if (!skb) {
1439                         bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1440                         return NULL;
1441                 }
1442         } else {
1443                 u8 *new_data;
1444                 dma_addr_t new_mapping;
1445
1446                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1447                 if (!new_data) {
1448                         bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1449                         return NULL;
1450                 }
1451
1452                 tpa_info->data = new_data;
1453                 tpa_info->data_ptr = new_data + bp->rx_offset;
1454                 tpa_info->mapping = new_mapping;
1455
1456                 skb = build_skb(data, 0);
1457                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1458                                        bp->rx_buf_use_size, bp->rx_dir,
1459                                        DMA_ATTR_WEAK_ORDERING);
1460
1461                 if (!skb) {
1462                         kfree(data);
1463                         bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1464                         return NULL;
1465                 }
1466                 skb_reserve(skb, bp->rx_offset);
1467                 skb_put(skb, len);
1468         }
1469
1470         if (agg_bufs) {
1471                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
1472                 if (!skb) {
1473                         /* Page reuse already handled by bnxt_rx_pages(). */
1474                         return NULL;
1475                 }
1476         }
1477
1478         skb->protocol =
1479                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1480
1481         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1482                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1483
1484         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1485             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1486                 u16 vlan_proto = tpa_info->metadata >>
1487                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1488                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1489
1490                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1491         }
1492
1493         skb_checksum_none_assert(skb);
1494         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1495                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1496                 skb->csum_level =
1497                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1498         }
1499
1500         if (TPA_END_GRO(tpa_end))
1501                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1502
1503         return skb;
1504 }
1505
1506 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1507                              struct sk_buff *skb)
1508 {
1509         if (skb->dev != bp->dev) {
1510                 /* this packet belongs to a vf-rep */
1511                 bnxt_vf_rep_rx(bp, skb);
1512                 return;
1513         }
1514         skb_record_rx_queue(skb, bnapi->index);
1515         napi_gro_receive(&bnapi->napi, skb);
1516 }
1517
1518 /* returns the following:
1519  * 1       - 1 packet successfully received
1520  * 0       - successful TPA_START, packet not completed yet
1521  * -EBUSY  - completion ring does not have all the agg buffers yet
1522  * -ENOMEM - packet aborted due to out of memory
1523  * -EIO    - packet aborted due to hw error indicated in BD
1524  */
1525 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1526                        u32 *raw_cons, u8 *event)
1527 {
1528         struct bnxt_napi *bnapi = cpr->bnapi;
1529         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1530         struct net_device *dev = bp->dev;
1531         struct rx_cmp *rxcmp;
1532         struct rx_cmp_ext *rxcmp1;
1533         u32 tmp_raw_cons = *raw_cons;
1534         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1535         struct bnxt_sw_rx_bd *rx_buf;
1536         unsigned int len;
1537         u8 *data_ptr, agg_bufs, cmp_type;
1538         dma_addr_t dma_addr;
1539         struct sk_buff *skb;
1540         void *data;
1541         int rc = 0;
1542         u32 misc;
1543
1544         rxcmp = (struct rx_cmp *)
1545                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1546
1547         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1548         cp_cons = RING_CMP(tmp_raw_cons);
1549         rxcmp1 = (struct rx_cmp_ext *)
1550                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1551
1552         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1553                 return -EBUSY;
1554
1555         cmp_type = RX_CMP_TYPE(rxcmp);
1556
1557         prod = rxr->rx_prod;
1558
1559         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1560                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1561                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1562
1563                 *event |= BNXT_RX_EVENT;
1564                 goto next_rx_no_prod_no_len;
1565
1566         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1567                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1568                                    (struct rx_tpa_end_cmp *)rxcmp,
1569                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1570
1571                 if (IS_ERR(skb))
1572                         return -EBUSY;
1573
1574                 rc = -ENOMEM;
1575                 if (likely(skb)) {
1576                         bnxt_deliver_skb(bp, bnapi, skb);
1577                         rc = 1;
1578                 }
1579                 *event |= BNXT_RX_EVENT;
1580                 goto next_rx_no_prod_no_len;
1581         }
1582
1583         cons = rxcmp->rx_cmp_opaque;
1584         rx_buf = &rxr->rx_buf_ring[cons];
1585         data = rx_buf->data;
1586         data_ptr = rx_buf->data_ptr;
1587         if (unlikely(cons != rxr->rx_next_cons)) {
1588                 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1589
1590                 bnxt_sched_reset(bp, rxr);
1591                 return rc1;
1592         }
1593         prefetch(data_ptr);
1594
1595         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1596         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1597
1598         if (agg_bufs) {
1599                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1600                         return -EBUSY;
1601
1602                 cp_cons = NEXT_CMP(cp_cons);
1603                 *event |= BNXT_AGG_EVENT;
1604         }
1605         *event |= BNXT_RX_EVENT;
1606
1607         rx_buf->data = NULL;
1608         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1609                 bnxt_reuse_rx_data(rxr, cons, data);
1610                 if (agg_bufs)
1611                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
1612
1613                 rc = -EIO;
1614                 goto next_rx;
1615         }
1616
1617         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1618         dma_addr = rx_buf->mapping;
1619
1620         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1621                 rc = 1;
1622                 goto next_rx;
1623         }
1624
1625         if (len <= bp->rx_copy_thresh) {
1626                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1627                 bnxt_reuse_rx_data(rxr, cons, data);
1628                 if (!skb) {
1629                         rc = -ENOMEM;
1630                         goto next_rx;
1631                 }
1632         } else {
1633                 u32 payload;
1634
1635                 if (rx_buf->data_ptr == data_ptr)
1636                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1637                 else
1638                         payload = 0;
1639                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1640                                       payload | len);
1641                 if (!skb) {
1642                         rc = -ENOMEM;
1643                         goto next_rx;
1644                 }
1645         }
1646
1647         if (agg_bufs) {
1648                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
1649                 if (!skb) {
1650                         rc = -ENOMEM;
1651                         goto next_rx;
1652                 }
1653         }
1654
1655         if (RX_CMP_HASH_VALID(rxcmp)) {
1656                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1657                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1658
1659                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1660                 if (hash_type != 1 && hash_type != 3)
1661                         type = PKT_HASH_TYPE_L3;
1662                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1663         }
1664
1665         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1666         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1667
1668         if ((rxcmp1->rx_cmp_flags2 &
1669              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1670             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1671                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1672                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1673                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1674
1675                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1676         }
1677
1678         skb_checksum_none_assert(skb);
1679         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1680                 if (dev->features & NETIF_F_RXCSUM) {
1681                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1682                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1683                 }
1684         } else {
1685                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1686                         if (dev->features & NETIF_F_RXCSUM)
1687                                 bnapi->cp_ring.rx_l4_csum_errors++;
1688                 }
1689         }
1690
1691         bnxt_deliver_skb(bp, bnapi, skb);
1692         rc = 1;
1693
1694 next_rx:
1695         rxr->rx_prod = NEXT_RX(prod);
1696         rxr->rx_next_cons = NEXT_RX(cons);
1697
1698         cpr->rx_packets += 1;
1699         cpr->rx_bytes += len;
1700
1701 next_rx_no_prod_no_len:
1702         *raw_cons = tmp_raw_cons;
1703
1704         return rc;
1705 }
1706
1707 /* In netpoll mode, if we are using a combined completion ring, we need to
1708  * discard the rx packets and recycle the buffers.
1709  */
1710 static int bnxt_force_rx_discard(struct bnxt *bp,
1711                                  struct bnxt_cp_ring_info *cpr,
1712                                  u32 *raw_cons, u8 *event)
1713 {
1714         u32 tmp_raw_cons = *raw_cons;
1715         struct rx_cmp_ext *rxcmp1;
1716         struct rx_cmp *rxcmp;
1717         u16 cp_cons;
1718         u8 cmp_type;
1719
1720         cp_cons = RING_CMP(tmp_raw_cons);
1721         rxcmp = (struct rx_cmp *)
1722                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1723
1724         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1725         cp_cons = RING_CMP(tmp_raw_cons);
1726         rxcmp1 = (struct rx_cmp_ext *)
1727                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1728
1729         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1730                 return -EBUSY;
1731
1732         cmp_type = RX_CMP_TYPE(rxcmp);
1733         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1734                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1735                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1736         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1737                 struct rx_tpa_end_cmp_ext *tpa_end1;
1738
1739                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1740                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1741                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1742         }
1743         return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1744 }
1745
1746 #define BNXT_GET_EVENT_PORT(data)       \
1747         ((data) &                       \
1748          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1749
1750 static int bnxt_async_event_process(struct bnxt *bp,
1751                                     struct hwrm_async_event_cmpl *cmpl)
1752 {
1753         u16 event_id = le16_to_cpu(cmpl->event_id);
1754
1755         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1756         switch (event_id) {
1757         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1758                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1759                 struct bnxt_link_info *link_info = &bp->link_info;
1760
1761                 if (BNXT_VF(bp))
1762                         goto async_event_process_exit;
1763
1764                 /* print unsupported speed warning in forced speed mode only */
1765                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1766                     (data1 & 0x20000)) {
1767                         u16 fw_speed = link_info->force_link_speed;
1768                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1769
1770                         if (speed != SPEED_UNKNOWN)
1771                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1772                                             speed);
1773                 }
1774                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1775         }
1776         /* fall through */
1777         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1778                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1779                 break;
1780         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1781                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1782                 break;
1783         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1784                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1785                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1786
1787                 if (BNXT_VF(bp))
1788                         break;
1789
1790                 if (bp->pf.port_id != port_id)
1791                         break;
1792
1793                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1794                 break;
1795         }
1796         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1797                 if (BNXT_PF(bp))
1798                         goto async_event_process_exit;
1799                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1800                 break;
1801         default:
1802                 goto async_event_process_exit;
1803         }
1804         bnxt_queue_sp_work(bp);
1805 async_event_process_exit:
1806         bnxt_ulp_async_events(bp, cmpl);
1807         return 0;
1808 }
1809
1810 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1811 {
1812         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1813         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1814         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1815                                 (struct hwrm_fwd_req_cmpl *)txcmp;
1816
1817         switch (cmpl_type) {
1818         case CMPL_BASE_TYPE_HWRM_DONE:
1819                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1820                 if (seq_id == bp->hwrm_intr_seq_id)
1821                         bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
1822                 else
1823                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1824                 break;
1825
1826         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1827                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1828
1829                 if ((vf_id < bp->pf.first_vf_id) ||
1830                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1831                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1832                                    vf_id);
1833                         return -EINVAL;
1834                 }
1835
1836                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1837                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1838                 bnxt_queue_sp_work(bp);
1839                 break;
1840
1841         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1842                 bnxt_async_event_process(bp,
1843                                          (struct hwrm_async_event_cmpl *)txcmp);
1844
1845         default:
1846                 break;
1847         }
1848
1849         return 0;
1850 }
1851
1852 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1853 {
1854         struct bnxt_napi *bnapi = dev_instance;
1855         struct bnxt *bp = bnapi->bp;
1856         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1857         u32 cons = RING_CMP(cpr->cp_raw_cons);
1858
1859         cpr->event_ctr++;
1860         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1861         napi_schedule(&bnapi->napi);
1862         return IRQ_HANDLED;
1863 }
1864
1865 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1866 {
1867         u32 raw_cons = cpr->cp_raw_cons;
1868         u16 cons = RING_CMP(raw_cons);
1869         struct tx_cmp *txcmp;
1870
1871         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1872
1873         return TX_CMP_VALID(txcmp, raw_cons);
1874 }
1875
1876 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1877 {
1878         struct bnxt_napi *bnapi = dev_instance;
1879         struct bnxt *bp = bnapi->bp;
1880         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1881         u32 cons = RING_CMP(cpr->cp_raw_cons);
1882         u32 int_status;
1883
1884         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1885
1886         if (!bnxt_has_work(bp, cpr)) {
1887                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1888                 /* return if erroneous interrupt */
1889                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1890                         return IRQ_NONE;
1891         }
1892
1893         /* disable ring IRQ */
1894         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
1895
1896         /* Return here if interrupt is shared and is disabled. */
1897         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1898                 return IRQ_HANDLED;
1899
1900         napi_schedule(&bnapi->napi);
1901         return IRQ_HANDLED;
1902 }
1903
1904 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1905                             int budget)
1906 {
1907         struct bnxt_napi *bnapi = cpr->bnapi;
1908         u32 raw_cons = cpr->cp_raw_cons;
1909         u32 cons;
1910         int tx_pkts = 0;
1911         int rx_pkts = 0;
1912         u8 event = 0;
1913         struct tx_cmp *txcmp;
1914
1915         cpr->has_more_work = 0;
1916         while (1) {
1917                 int rc;
1918
1919                 cons = RING_CMP(raw_cons);
1920                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1921
1922                 if (!TX_CMP_VALID(txcmp, raw_cons))
1923                         break;
1924
1925                 /* The valid test of the entry must be done first before
1926                  * reading any further.
1927                  */
1928                 dma_rmb();
1929                 cpr->had_work_done = 1;
1930                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1931                         tx_pkts++;
1932                         /* return full budget so NAPI will complete. */
1933                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
1934                                 rx_pkts = budget;
1935                                 raw_cons = NEXT_RAW_CMP(raw_cons);
1936                                 if (budget)
1937                                         cpr->has_more_work = 1;
1938                                 break;
1939                         }
1940                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1941                         if (likely(budget))
1942                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
1943                         else
1944                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
1945                                                            &event);
1946                         if (likely(rc >= 0))
1947                                 rx_pkts += rc;
1948                         /* Increment rx_pkts when rc is -ENOMEM to count towards
1949                          * the NAPI budget.  Otherwise, we may potentially loop
1950                          * here forever if we consistently cannot allocate
1951                          * buffers.
1952                          */
1953                         else if (rc == -ENOMEM && budget)
1954                                 rx_pkts++;
1955                         else if (rc == -EBUSY)  /* partial completion */
1956                                 break;
1957                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1958                                      CMPL_BASE_TYPE_HWRM_DONE) ||
1959                                     (TX_CMP_TYPE(txcmp) ==
1960                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1961                                     (TX_CMP_TYPE(txcmp) ==
1962                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1963                         bnxt_hwrm_handler(bp, txcmp);
1964                 }
1965                 raw_cons = NEXT_RAW_CMP(raw_cons);
1966
1967                 if (rx_pkts && rx_pkts == budget) {
1968                         cpr->has_more_work = 1;
1969                         break;
1970                 }
1971         }
1972
1973         if (event & BNXT_TX_EVENT) {
1974                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
1975                 u16 prod = txr->tx_prod;
1976
1977                 /* Sync BD data before updating doorbell */
1978                 wmb();
1979
1980                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
1981         }
1982
1983         cpr->cp_raw_cons = raw_cons;
1984         bnapi->tx_pkts += tx_pkts;
1985         bnapi->events |= event;
1986         return rx_pkts;
1987 }
1988
1989 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
1990 {
1991         if (bnapi->tx_pkts) {
1992                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
1993                 bnapi->tx_pkts = 0;
1994         }
1995
1996         if (bnapi->events & BNXT_RX_EVENT) {
1997                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1998
1999                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2000                 if (bnapi->events & BNXT_AGG_EVENT)
2001                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2002         }
2003         bnapi->events = 0;
2004 }
2005
2006 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2007                           int budget)
2008 {
2009         struct bnxt_napi *bnapi = cpr->bnapi;
2010         int rx_pkts;
2011
2012         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2013
2014         /* ACK completion ring before freeing tx ring and producing new
2015          * buffers in rx/agg rings to prevent overflowing the completion
2016          * ring.
2017          */
2018         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2019
2020         __bnxt_poll_work_done(bp, bnapi);
2021         return rx_pkts;
2022 }
2023
2024 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2025 {
2026         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2027         struct bnxt *bp = bnapi->bp;
2028         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2029         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2030         struct tx_cmp *txcmp;
2031         struct rx_cmp_ext *rxcmp1;
2032         u32 cp_cons, tmp_raw_cons;
2033         u32 raw_cons = cpr->cp_raw_cons;
2034         u32 rx_pkts = 0;
2035         u8 event = 0;
2036
2037         while (1) {
2038                 int rc;
2039
2040                 cp_cons = RING_CMP(raw_cons);
2041                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2042
2043                 if (!TX_CMP_VALID(txcmp, raw_cons))
2044                         break;
2045
2046                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2047                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2048                         cp_cons = RING_CMP(tmp_raw_cons);
2049                         rxcmp1 = (struct rx_cmp_ext *)
2050                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2051
2052                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2053                                 break;
2054
2055                         /* force an error to recycle the buffer */
2056                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2057                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2058
2059                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2060                         if (likely(rc == -EIO) && budget)
2061                                 rx_pkts++;
2062                         else if (rc == -EBUSY)  /* partial completion */
2063                                 break;
2064                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2065                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2066                         bnxt_hwrm_handler(bp, txcmp);
2067                 } else {
2068                         netdev_err(bp->dev,
2069                                    "Invalid completion received on special ring\n");
2070                 }
2071                 raw_cons = NEXT_RAW_CMP(raw_cons);
2072
2073                 if (rx_pkts == budget)
2074                         break;
2075         }
2076
2077         cpr->cp_raw_cons = raw_cons;
2078         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2079         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2080
2081         if (event & BNXT_AGG_EVENT)
2082                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2083
2084         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2085                 napi_complete_done(napi, rx_pkts);
2086                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2087         }
2088         return rx_pkts;
2089 }
2090
2091 static int bnxt_poll(struct napi_struct *napi, int budget)
2092 {
2093         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2094         struct bnxt *bp = bnapi->bp;
2095         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2096         int work_done = 0;
2097
2098         while (1) {
2099                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2100
2101                 if (work_done >= budget) {
2102                         if (!budget)
2103                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2104                         break;
2105                 }
2106
2107                 if (!bnxt_has_work(bp, cpr)) {
2108                         if (napi_complete_done(napi, work_done))
2109                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2110                         break;
2111                 }
2112         }
2113         if (bp->flags & BNXT_FLAG_DIM) {
2114                 struct net_dim_sample dim_sample;
2115
2116                 net_dim_sample(cpr->event_ctr,
2117                                cpr->rx_packets,
2118                                cpr->rx_bytes,
2119                                &dim_sample);
2120                 net_dim(&cpr->dim, dim_sample);
2121         }
2122         mmiowb();
2123         return work_done;
2124 }
2125
2126 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2127 {
2128         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2129         int i, work_done = 0;
2130
2131         for (i = 0; i < 2; i++) {
2132                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2133
2134                 if (cpr2) {
2135                         work_done += __bnxt_poll_work(bp, cpr2,
2136                                                       budget - work_done);
2137                         cpr->has_more_work |= cpr2->has_more_work;
2138                 }
2139         }
2140         return work_done;
2141 }
2142
2143 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2144                                  u64 dbr_type, bool all)
2145 {
2146         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2147         int i;
2148
2149         for (i = 0; i < 2; i++) {
2150                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2151                 struct bnxt_db_info *db;
2152
2153                 if (cpr2 && (all || cpr2->had_work_done)) {
2154                         db = &cpr2->cp_db;
2155                         writeq(db->db_key64 | dbr_type |
2156                                RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2157                         cpr2->had_work_done = 0;
2158                 }
2159         }
2160         __bnxt_poll_work_done(bp, bnapi);
2161 }
2162
2163 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2164 {
2165         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2166         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2167         u32 raw_cons = cpr->cp_raw_cons;
2168         struct bnxt *bp = bnapi->bp;
2169         struct nqe_cn *nqcmp;
2170         int work_done = 0;
2171         u32 cons;
2172
2173         if (cpr->has_more_work) {
2174                 cpr->has_more_work = 0;
2175                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2176                 if (cpr->has_more_work) {
2177                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2178                         return work_done;
2179                 }
2180                 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2181                 if (napi_complete_done(napi, work_done))
2182                         BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2183                 return work_done;
2184         }
2185         while (1) {
2186                 cons = RING_CMP(raw_cons);
2187                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2188
2189                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2190                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2191                                              false);
2192                         cpr->cp_raw_cons = raw_cons;
2193                         if (napi_complete_done(napi, work_done))
2194                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2195                                                   cpr->cp_raw_cons);
2196                         return work_done;
2197                 }
2198
2199                 /* The valid test of the entry must be done first before
2200                  * reading any further.
2201                  */
2202                 dma_rmb();
2203
2204                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2205                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2206                         struct bnxt_cp_ring_info *cpr2;
2207
2208                         cpr2 = cpr->cp_ring_arr[idx];
2209                         work_done += __bnxt_poll_work(bp, cpr2,
2210                                                       budget - work_done);
2211                         cpr->has_more_work = cpr2->has_more_work;
2212                 } else {
2213                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2214                 }
2215                 raw_cons = NEXT_RAW_CMP(raw_cons);
2216                 if (cpr->has_more_work)
2217                         break;
2218         }
2219         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2220         cpr->cp_raw_cons = raw_cons;
2221         return work_done;
2222 }
2223
2224 static void bnxt_free_tx_skbs(struct bnxt *bp)
2225 {
2226         int i, max_idx;
2227         struct pci_dev *pdev = bp->pdev;
2228
2229         if (!bp->tx_ring)
2230                 return;
2231
2232         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2233         for (i = 0; i < bp->tx_nr_rings; i++) {
2234                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2235                 int j;
2236
2237                 for (j = 0; j < max_idx;) {
2238                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2239                         struct sk_buff *skb = tx_buf->skb;
2240                         int k, last;
2241
2242                         if (!skb) {
2243                                 j++;
2244                                 continue;
2245                         }
2246
2247                         tx_buf->skb = NULL;
2248
2249                         if (tx_buf->is_push) {
2250                                 dev_kfree_skb(skb);
2251                                 j += 2;
2252                                 continue;
2253                         }
2254
2255                         dma_unmap_single(&pdev->dev,
2256                                          dma_unmap_addr(tx_buf, mapping),
2257                                          skb_headlen(skb),
2258                                          PCI_DMA_TODEVICE);
2259
2260                         last = tx_buf->nr_frags;
2261                         j += 2;
2262                         for (k = 0; k < last; k++, j++) {
2263                                 int ring_idx = j & bp->tx_ring_mask;
2264                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2265
2266                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2267                                 dma_unmap_page(
2268                                         &pdev->dev,
2269                                         dma_unmap_addr(tx_buf, mapping),
2270                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2271                         }
2272                         dev_kfree_skb(skb);
2273                 }
2274                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2275         }
2276 }
2277
2278 static void bnxt_free_rx_skbs(struct bnxt *bp)
2279 {
2280         int i, max_idx, max_agg_idx;
2281         struct pci_dev *pdev = bp->pdev;
2282
2283         if (!bp->rx_ring)
2284                 return;
2285
2286         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2287         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2288         for (i = 0; i < bp->rx_nr_rings; i++) {
2289                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2290                 int j;
2291
2292                 if (rxr->rx_tpa) {
2293                         for (j = 0; j < MAX_TPA; j++) {
2294                                 struct bnxt_tpa_info *tpa_info =
2295                                                         &rxr->rx_tpa[j];
2296                                 u8 *data = tpa_info->data;
2297
2298                                 if (!data)
2299                                         continue;
2300
2301                                 dma_unmap_single_attrs(&pdev->dev,
2302                                                        tpa_info->mapping,
2303                                                        bp->rx_buf_use_size,
2304                                                        bp->rx_dir,
2305                                                        DMA_ATTR_WEAK_ORDERING);
2306
2307                                 tpa_info->data = NULL;
2308
2309                                 kfree(data);
2310                         }
2311                 }
2312
2313                 for (j = 0; j < max_idx; j++) {
2314                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2315                         dma_addr_t mapping = rx_buf->mapping;
2316                         void *data = rx_buf->data;
2317
2318                         if (!data)
2319                                 continue;
2320
2321                         rx_buf->data = NULL;
2322
2323                         if (BNXT_RX_PAGE_MODE(bp)) {
2324                                 mapping -= bp->rx_dma_offset;
2325                                 dma_unmap_page_attrs(&pdev->dev, mapping,
2326                                                      PAGE_SIZE, bp->rx_dir,
2327                                                      DMA_ATTR_WEAK_ORDERING);
2328                                 __free_page(data);
2329                         } else {
2330                                 dma_unmap_single_attrs(&pdev->dev, mapping,
2331                                                        bp->rx_buf_use_size,
2332                                                        bp->rx_dir,
2333                                                        DMA_ATTR_WEAK_ORDERING);
2334                                 kfree(data);
2335                         }
2336                 }
2337
2338                 for (j = 0; j < max_agg_idx; j++) {
2339                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2340                                 &rxr->rx_agg_ring[j];
2341                         struct page *page = rx_agg_buf->page;
2342
2343                         if (!page)
2344                                 continue;
2345
2346                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2347                                              BNXT_RX_PAGE_SIZE,
2348                                              PCI_DMA_FROMDEVICE,
2349                                              DMA_ATTR_WEAK_ORDERING);
2350
2351                         rx_agg_buf->page = NULL;
2352                         __clear_bit(j, rxr->rx_agg_bmap);
2353
2354                         __free_page(page);
2355                 }
2356                 if (rxr->rx_page) {
2357                         __free_page(rxr->rx_page);
2358                         rxr->rx_page = NULL;
2359                 }
2360         }
2361 }
2362
2363 static void bnxt_free_skbs(struct bnxt *bp)
2364 {
2365         bnxt_free_tx_skbs(bp);
2366         bnxt_free_rx_skbs(bp);
2367 }
2368
2369 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2370 {
2371         struct pci_dev *pdev = bp->pdev;
2372         int i;
2373
2374         for (i = 0; i < rmem->nr_pages; i++) {
2375                 if (!rmem->pg_arr[i])
2376                         continue;
2377
2378                 dma_free_coherent(&pdev->dev, rmem->page_size,
2379                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2380
2381                 rmem->pg_arr[i] = NULL;
2382         }
2383         if (rmem->pg_tbl) {
2384                 size_t pg_tbl_size = rmem->nr_pages * 8;
2385
2386                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2387                         pg_tbl_size = rmem->page_size;
2388                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2389                                   rmem->pg_tbl, rmem->pg_tbl_map);
2390                 rmem->pg_tbl = NULL;
2391         }
2392         if (rmem->vmem_size && *rmem->vmem) {
2393                 vfree(*rmem->vmem);
2394                 *rmem->vmem = NULL;
2395         }
2396 }
2397
2398 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2399 {
2400         struct pci_dev *pdev = bp->pdev;
2401         u64 valid_bit = 0;
2402         int i;
2403
2404         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2405                 valid_bit = PTU_PTE_VALID;
2406         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2407                 size_t pg_tbl_size = rmem->nr_pages * 8;
2408
2409                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2410                         pg_tbl_size = rmem->page_size;
2411                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2412                                                   &rmem->pg_tbl_map,
2413                                                   GFP_KERNEL);
2414                 if (!rmem->pg_tbl)
2415                         return -ENOMEM;
2416         }
2417
2418         for (i = 0; i < rmem->nr_pages; i++) {
2419                 u64 extra_bits = valid_bit;
2420
2421                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2422                                                      rmem->page_size,
2423                                                      &rmem->dma_arr[i],
2424                                                      GFP_KERNEL);
2425                 if (!rmem->pg_arr[i])
2426                         return -ENOMEM;
2427
2428                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2429                         if (i == rmem->nr_pages - 2 &&
2430                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2431                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2432                         else if (i == rmem->nr_pages - 1 &&
2433                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2434                                 extra_bits |= PTU_PTE_LAST;
2435                         rmem->pg_tbl[i] =
2436                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2437                 }
2438         }
2439
2440         if (rmem->vmem_size) {
2441                 *rmem->vmem = vzalloc(rmem->vmem_size);
2442                 if (!(*rmem->vmem))
2443                         return -ENOMEM;
2444         }
2445         return 0;
2446 }
2447
2448 static void bnxt_free_rx_rings(struct bnxt *bp)
2449 {
2450         int i;
2451
2452         if (!bp->rx_ring)
2453                 return;
2454
2455         for (i = 0; i < bp->rx_nr_rings; i++) {
2456                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2457                 struct bnxt_ring_struct *ring;
2458
2459                 if (rxr->xdp_prog)
2460                         bpf_prog_put(rxr->xdp_prog);
2461
2462                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2463                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2464
2465                 kfree(rxr->rx_tpa);
2466                 rxr->rx_tpa = NULL;
2467
2468                 kfree(rxr->rx_agg_bmap);
2469                 rxr->rx_agg_bmap = NULL;
2470
2471                 ring = &rxr->rx_ring_struct;
2472                 bnxt_free_ring(bp, &ring->ring_mem);
2473
2474                 ring = &rxr->rx_agg_ring_struct;
2475                 bnxt_free_ring(bp, &ring->ring_mem);
2476         }
2477 }
2478
2479 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2480 {
2481         int i, rc, agg_rings = 0, tpa_rings = 0;
2482
2483         if (!bp->rx_ring)
2484                 return -ENOMEM;
2485
2486         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2487                 agg_rings = 1;
2488
2489         if (bp->flags & BNXT_FLAG_TPA)
2490                 tpa_rings = 1;
2491
2492         for (i = 0; i < bp->rx_nr_rings; i++) {
2493                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2494                 struct bnxt_ring_struct *ring;
2495
2496                 ring = &rxr->rx_ring_struct;
2497
2498                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2499                 if (rc < 0)
2500                         return rc;
2501
2502                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2503                 if (rc)
2504                         return rc;
2505
2506                 ring->grp_idx = i;
2507                 if (agg_rings) {
2508                         u16 mem_size;
2509
2510                         ring = &rxr->rx_agg_ring_struct;
2511                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2512                         if (rc)
2513                                 return rc;
2514
2515                         ring->grp_idx = i;
2516                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2517                         mem_size = rxr->rx_agg_bmap_size / 8;
2518                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2519                         if (!rxr->rx_agg_bmap)
2520                                 return -ENOMEM;
2521
2522                         if (tpa_rings) {
2523                                 rxr->rx_tpa = kcalloc(MAX_TPA,
2524                                                 sizeof(struct bnxt_tpa_info),
2525                                                 GFP_KERNEL);
2526                                 if (!rxr->rx_tpa)
2527                                         return -ENOMEM;
2528                         }
2529                 }
2530         }
2531         return 0;
2532 }
2533
2534 static void bnxt_free_tx_rings(struct bnxt *bp)
2535 {
2536         int i;
2537         struct pci_dev *pdev = bp->pdev;
2538
2539         if (!bp->tx_ring)
2540                 return;
2541
2542         for (i = 0; i < bp->tx_nr_rings; i++) {
2543                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2544                 struct bnxt_ring_struct *ring;
2545
2546                 if (txr->tx_push) {
2547                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2548                                           txr->tx_push, txr->tx_push_mapping);
2549                         txr->tx_push = NULL;
2550                 }
2551
2552                 ring = &txr->tx_ring_struct;
2553
2554                 bnxt_free_ring(bp, &ring->ring_mem);
2555         }
2556 }
2557
2558 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2559 {
2560         int i, j, rc;
2561         struct pci_dev *pdev = bp->pdev;
2562
2563         bp->tx_push_size = 0;
2564         if (bp->tx_push_thresh) {
2565                 int push_size;
2566
2567                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2568                                         bp->tx_push_thresh);
2569
2570                 if (push_size > 256) {
2571                         push_size = 0;
2572                         bp->tx_push_thresh = 0;
2573                 }
2574
2575                 bp->tx_push_size = push_size;
2576         }
2577
2578         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2579                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2580                 struct bnxt_ring_struct *ring;
2581                 u8 qidx;
2582
2583                 ring = &txr->tx_ring_struct;
2584
2585                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2586                 if (rc)
2587                         return rc;
2588
2589                 ring->grp_idx = txr->bnapi->index;
2590                 if (bp->tx_push_size) {
2591                         dma_addr_t mapping;
2592
2593                         /* One pre-allocated DMA buffer to backup
2594                          * TX push operation
2595                          */
2596                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2597                                                 bp->tx_push_size,
2598                                                 &txr->tx_push_mapping,
2599                                                 GFP_KERNEL);
2600
2601                         if (!txr->tx_push)
2602                                 return -ENOMEM;
2603
2604                         mapping = txr->tx_push_mapping +
2605                                 sizeof(struct tx_push_bd);
2606                         txr->data_mapping = cpu_to_le64(mapping);
2607
2608                         memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
2609                 }
2610                 qidx = bp->tc_to_qidx[j];
2611                 ring->queue_id = bp->q_info[qidx].queue_id;
2612                 if (i < bp->tx_nr_rings_xdp)
2613                         continue;
2614                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2615                         j++;
2616         }
2617         return 0;
2618 }
2619
2620 static void bnxt_free_cp_rings(struct bnxt *bp)
2621 {
2622         int i;
2623
2624         if (!bp->bnapi)
2625                 return;
2626
2627         for (i = 0; i < bp->cp_nr_rings; i++) {
2628                 struct bnxt_napi *bnapi = bp->bnapi[i];
2629                 struct bnxt_cp_ring_info *cpr;
2630                 struct bnxt_ring_struct *ring;
2631                 int j;
2632
2633                 if (!bnapi)
2634                         continue;
2635
2636                 cpr = &bnapi->cp_ring;
2637                 ring = &cpr->cp_ring_struct;
2638
2639                 bnxt_free_ring(bp, &ring->ring_mem);
2640
2641                 for (j = 0; j < 2; j++) {
2642                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2643
2644                         if (cpr2) {
2645                                 ring = &cpr2->cp_ring_struct;
2646                                 bnxt_free_ring(bp, &ring->ring_mem);
2647                                 kfree(cpr2);
2648                                 cpr->cp_ring_arr[j] = NULL;
2649                         }
2650                 }
2651         }
2652 }
2653
2654 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2655 {
2656         struct bnxt_ring_mem_info *rmem;
2657         struct bnxt_ring_struct *ring;
2658         struct bnxt_cp_ring_info *cpr;
2659         int rc;
2660
2661         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2662         if (!cpr)
2663                 return NULL;
2664
2665         ring = &cpr->cp_ring_struct;
2666         rmem = &ring->ring_mem;
2667         rmem->nr_pages = bp->cp_nr_pages;
2668         rmem->page_size = HW_CMPD_RING_SIZE;
2669         rmem->pg_arr = (void **)cpr->cp_desc_ring;
2670         rmem->dma_arr = cpr->cp_desc_mapping;
2671         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
2672         rc = bnxt_alloc_ring(bp, rmem);
2673         if (rc) {
2674                 bnxt_free_ring(bp, rmem);
2675                 kfree(cpr);
2676                 cpr = NULL;
2677         }
2678         return cpr;
2679 }
2680
2681 static int bnxt_alloc_cp_rings(struct bnxt *bp)
2682 {
2683         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
2684         int i, rc, ulp_base_vec, ulp_msix;
2685
2686         ulp_msix = bnxt_get_ulp_msix_num(bp);
2687         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
2688         for (i = 0; i < bp->cp_nr_rings; i++) {
2689                 struct bnxt_napi *bnapi = bp->bnapi[i];
2690                 struct bnxt_cp_ring_info *cpr;
2691                 struct bnxt_ring_struct *ring;
2692
2693                 if (!bnapi)
2694                         continue;
2695
2696                 cpr = &bnapi->cp_ring;
2697                 cpr->bnapi = bnapi;
2698                 ring = &cpr->cp_ring_struct;
2699
2700                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2701                 if (rc)
2702                         return rc;
2703
2704                 if (ulp_msix && i >= ulp_base_vec)
2705                         ring->map_idx = i + ulp_msix;
2706                 else
2707                         ring->map_idx = i;
2708
2709                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2710                         continue;
2711
2712                 if (i < bp->rx_nr_rings) {
2713                         struct bnxt_cp_ring_info *cpr2 =
2714                                 bnxt_alloc_cp_sub_ring(bp);
2715
2716                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
2717                         if (!cpr2)
2718                                 return -ENOMEM;
2719                         cpr2->bnapi = bnapi;
2720                 }
2721                 if ((sh && i < bp->tx_nr_rings) ||
2722                     (!sh && i >= bp->rx_nr_rings)) {
2723                         struct bnxt_cp_ring_info *cpr2 =
2724                                 bnxt_alloc_cp_sub_ring(bp);
2725
2726                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
2727                         if (!cpr2)
2728                                 return -ENOMEM;
2729                         cpr2->bnapi = bnapi;
2730                 }
2731         }
2732         return 0;
2733 }
2734
2735 static void bnxt_init_ring_struct(struct bnxt *bp)
2736 {
2737         int i;
2738
2739         for (i = 0; i < bp->cp_nr_rings; i++) {
2740                 struct bnxt_napi *bnapi = bp->bnapi[i];
2741                 struct bnxt_ring_mem_info *rmem;
2742                 struct bnxt_cp_ring_info *cpr;
2743                 struct bnxt_rx_ring_info *rxr;
2744                 struct bnxt_tx_ring_info *txr;
2745                 struct bnxt_ring_struct *ring;
2746
2747                 if (!bnapi)
2748                         continue;
2749
2750                 cpr = &bnapi->cp_ring;
2751                 ring = &cpr->cp_ring_struct;
2752                 rmem = &ring->ring_mem;
2753                 rmem->nr_pages = bp->cp_nr_pages;
2754                 rmem->page_size = HW_CMPD_RING_SIZE;
2755                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2756                 rmem->dma_arr = cpr->cp_desc_mapping;
2757                 rmem->vmem_size = 0;
2758
2759                 rxr = bnapi->rx_ring;
2760                 if (!rxr)
2761                         goto skip_rx;
2762
2763                 ring = &rxr->rx_ring_struct;
2764                 rmem = &ring->ring_mem;
2765                 rmem->nr_pages = bp->rx_nr_pages;
2766                 rmem->page_size = HW_RXBD_RING_SIZE;
2767                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
2768                 rmem->dma_arr = rxr->rx_desc_mapping;
2769                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2770                 rmem->vmem = (void **)&rxr->rx_buf_ring;
2771
2772                 ring = &rxr->rx_agg_ring_struct;
2773                 rmem = &ring->ring_mem;
2774                 rmem->nr_pages = bp->rx_agg_nr_pages;
2775                 rmem->page_size = HW_RXBD_RING_SIZE;
2776                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
2777                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
2778                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2779                 rmem->vmem = (void **)&rxr->rx_agg_ring;
2780
2781 skip_rx:
2782                 txr = bnapi->tx_ring;
2783                 if (!txr)
2784                         continue;
2785
2786                 ring = &txr->tx_ring_struct;
2787                 rmem = &ring->ring_mem;
2788                 rmem->nr_pages = bp->tx_nr_pages;
2789                 rmem->page_size = HW_RXBD_RING_SIZE;
2790                 rmem->pg_arr = (void **)txr->tx_desc_ring;
2791                 rmem->dma_arr = txr->tx_desc_mapping;
2792                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2793                 rmem->vmem = (void **)&txr->tx_buf_ring;
2794         }
2795 }
2796
2797 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2798 {
2799         int i;
2800         u32 prod;
2801         struct rx_bd **rx_buf_ring;
2802
2803         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
2804         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
2805                 int j;
2806                 struct rx_bd *rxbd;
2807
2808                 rxbd = rx_buf_ring[i];
2809                 if (!rxbd)
2810                         continue;
2811
2812                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2813                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2814                         rxbd->rx_bd_opaque = prod;
2815                 }
2816         }
2817 }
2818
2819 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2820 {
2821         struct net_device *dev = bp->dev;
2822         struct bnxt_rx_ring_info *rxr;
2823         struct bnxt_ring_struct *ring;
2824         u32 prod, type;
2825         int i;
2826
2827         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2828                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2829
2830         if (NET_IP_ALIGN == 2)
2831                 type |= RX_BD_FLAGS_SOP;
2832
2833         rxr = &bp->rx_ring[ring_nr];
2834         ring = &rxr->rx_ring_struct;
2835         bnxt_init_rxbd_pages(ring, type);
2836
2837         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2838                 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2839                 if (IS_ERR(rxr->xdp_prog)) {
2840                         int rc = PTR_ERR(rxr->xdp_prog);
2841
2842                         rxr->xdp_prog = NULL;
2843                         return rc;
2844                 }
2845         }
2846         prod = rxr->rx_prod;
2847         for (i = 0; i < bp->rx_ring_size; i++) {
2848                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2849                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2850                                     ring_nr, i, bp->rx_ring_size);
2851                         break;
2852                 }
2853                 prod = NEXT_RX(prod);
2854         }
2855         rxr->rx_prod = prod;
2856         ring->fw_ring_id = INVALID_HW_RING_ID;
2857
2858         ring = &rxr->rx_agg_ring_struct;
2859         ring->fw_ring_id = INVALID_HW_RING_ID;
2860
2861         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2862                 return 0;
2863
2864         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2865                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2866
2867         bnxt_init_rxbd_pages(ring, type);
2868
2869         prod = rxr->rx_agg_prod;
2870         for (i = 0; i < bp->rx_agg_ring_size; i++) {
2871                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2872                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2873                                     ring_nr, i, bp->rx_ring_size);
2874                         break;
2875                 }
2876                 prod = NEXT_RX_AGG(prod);
2877         }
2878         rxr->rx_agg_prod = prod;
2879
2880         if (bp->flags & BNXT_FLAG_TPA) {
2881                 if (rxr->rx_tpa) {
2882                         u8 *data;
2883                         dma_addr_t mapping;
2884
2885                         for (i = 0; i < MAX_TPA; i++) {
2886                                 data = __bnxt_alloc_rx_data(bp, &mapping,
2887                                                             GFP_KERNEL);
2888                                 if (!data)
2889                                         return -ENOMEM;
2890
2891                                 rxr->rx_tpa[i].data = data;
2892                                 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
2893                                 rxr->rx_tpa[i].mapping = mapping;
2894                         }
2895                 } else {
2896                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2897                         return -ENOMEM;
2898                 }
2899         }
2900
2901         return 0;
2902 }
2903
2904 static void bnxt_init_cp_rings(struct bnxt *bp)
2905 {
2906         int i, j;
2907
2908         for (i = 0; i < bp->cp_nr_rings; i++) {
2909                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2910                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2911
2912                 ring->fw_ring_id = INVALID_HW_RING_ID;
2913                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2914                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2915                 for (j = 0; j < 2; j++) {
2916                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2917
2918                         if (!cpr2)
2919                                 continue;
2920
2921                         ring = &cpr2->cp_ring_struct;
2922                         ring->fw_ring_id = INVALID_HW_RING_ID;
2923                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2924                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2925                 }
2926         }
2927 }
2928
2929 static int bnxt_init_rx_rings(struct bnxt *bp)
2930 {
2931         int i, rc = 0;
2932
2933         if (BNXT_RX_PAGE_MODE(bp)) {
2934                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2935                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
2936         } else {
2937                 bp->rx_offset = BNXT_RX_OFFSET;
2938                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2939         }
2940
2941         for (i = 0; i < bp->rx_nr_rings; i++) {
2942                 rc = bnxt_init_one_rx_ring(bp, i);
2943                 if (rc)
2944                         break;
2945         }
2946
2947         return rc;
2948 }
2949
2950 static int bnxt_init_tx_rings(struct bnxt *bp)
2951 {
2952         u16 i;
2953
2954         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2955                                    MAX_SKB_FRAGS + 1);
2956
2957         for (i = 0; i < bp->tx_nr_rings; i++) {
2958                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2959                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2960
2961                 ring->fw_ring_id = INVALID_HW_RING_ID;
2962         }
2963
2964         return 0;
2965 }
2966
2967 static void bnxt_free_ring_grps(struct bnxt *bp)
2968 {
2969         kfree(bp->grp_info);
2970         bp->grp_info = NULL;
2971 }
2972
2973 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2974 {
2975         int i;
2976
2977         if (irq_re_init) {
2978                 bp->grp_info = kcalloc(bp->cp_nr_rings,
2979                                        sizeof(struct bnxt_ring_grp_info),
2980                                        GFP_KERNEL);
2981                 if (!bp->grp_info)
2982                         return -ENOMEM;
2983         }
2984         for (i = 0; i < bp->cp_nr_rings; i++) {
2985                 if (irq_re_init)
2986                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2987                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2988                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2989                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2990                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2991         }
2992         return 0;
2993 }
2994
2995 static void bnxt_free_vnics(struct bnxt *bp)
2996 {
2997         kfree(bp->vnic_info);
2998         bp->vnic_info = NULL;
2999         bp->nr_vnics = 0;
3000 }
3001
3002 static int bnxt_alloc_vnics(struct bnxt *bp)
3003 {
3004         int num_vnics = 1;
3005
3006 #ifdef CONFIG_RFS_ACCEL
3007         if (bp->flags & BNXT_FLAG_RFS)
3008                 num_vnics += bp->rx_nr_rings;
3009 #endif
3010
3011         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3012                 num_vnics++;
3013
3014         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3015                                 GFP_KERNEL);
3016         if (!bp->vnic_info)
3017                 return -ENOMEM;
3018
3019         bp->nr_vnics = num_vnics;
3020         return 0;
3021 }
3022
3023 static void bnxt_init_vnics(struct bnxt *bp)
3024 {
3025         int i;
3026
3027         for (i = 0; i < bp->nr_vnics; i++) {
3028                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3029                 int j;
3030
3031                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3032                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3033                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3034
3035                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3036
3037                 if (bp->vnic_info[i].rss_hash_key) {
3038                         if (i == 0)
3039                                 prandom_bytes(vnic->rss_hash_key,
3040                                               HW_HASH_KEY_SIZE);
3041                         else
3042                                 memcpy(vnic->rss_hash_key,
3043                                        bp->vnic_info[0].rss_hash_key,
3044                                        HW_HASH_KEY_SIZE);
3045                 }
3046         }
3047 }
3048
3049 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3050 {
3051         int pages;
3052
3053         pages = ring_size / desc_per_pg;
3054
3055         if (!pages)
3056                 return 1;
3057
3058         pages++;
3059
3060         while (pages & (pages - 1))
3061                 pages++;
3062
3063         return pages;
3064 }
3065
3066 void bnxt_set_tpa_flags(struct bnxt *bp)
3067 {
3068         bp->flags &= ~BNXT_FLAG_TPA;
3069         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3070                 return;
3071         if (bp->dev->features & NETIF_F_LRO)
3072                 bp->flags |= BNXT_FLAG_LRO;
3073         else if (bp->dev->features & NETIF_F_GRO_HW)
3074                 bp->flags |= BNXT_FLAG_GRO;
3075 }
3076
3077 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3078  * be set on entry.
3079  */
3080 void bnxt_set_ring_params(struct bnxt *bp)
3081 {
3082         u32 ring_size, rx_size, rx_space;
3083         u32 agg_factor = 0, agg_ring_size = 0;
3084
3085         /* 8 for CRC and VLAN */
3086         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3087
3088         rx_space = rx_size + NET_SKB_PAD +
3089                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3090
3091         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3092         ring_size = bp->rx_ring_size;
3093         bp->rx_agg_ring_size = 0;
3094         bp->rx_agg_nr_pages = 0;
3095
3096         if (bp->flags & BNXT_FLAG_TPA)
3097                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3098
3099         bp->flags &= ~BNXT_FLAG_JUMBO;
3100         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3101                 u32 jumbo_factor;
3102
3103                 bp->flags |= BNXT_FLAG_JUMBO;
3104                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3105                 if (jumbo_factor > agg_factor)
3106                         agg_factor = jumbo_factor;
3107         }
3108         agg_ring_size = ring_size * agg_factor;
3109
3110         if (agg_ring_size) {
3111                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3112                                                         RX_DESC_CNT);
3113                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3114                         u32 tmp = agg_ring_size;
3115
3116                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3117                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3118                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3119                                     tmp, agg_ring_size);
3120                 }
3121                 bp->rx_agg_ring_size = agg_ring_size;
3122                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3123                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3124                 rx_space = rx_size + NET_SKB_PAD +
3125                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3126         }
3127
3128         bp->rx_buf_use_size = rx_size;
3129         bp->rx_buf_size = rx_space;
3130
3131         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3132         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3133
3134         ring_size = bp->tx_ring_size;
3135         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3136         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3137
3138         ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3139         bp->cp_ring_size = ring_size;
3140
3141         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3142         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3143                 bp->cp_nr_pages = MAX_CP_PAGES;
3144                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3145                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3146                             ring_size, bp->cp_ring_size);
3147         }
3148         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3149         bp->cp_ring_mask = bp->cp_bit - 1;
3150 }
3151
3152 /* Changing allocation mode of RX rings.
3153  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3154  */
3155 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3156 {
3157         if (page_mode) {
3158                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3159                         return -EOPNOTSUPP;
3160                 bp->dev->max_mtu =
3161                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3162                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3163                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3164                 bp->rx_dir = DMA_BIDIRECTIONAL;
3165                 bp->rx_skb_func = bnxt_rx_page_skb;
3166                 /* Disable LRO or GRO_HW */
3167                 netdev_update_features(bp->dev);
3168         } else {
3169                 bp->dev->max_mtu = bp->max_mtu;
3170                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3171                 bp->rx_dir = DMA_FROM_DEVICE;
3172                 bp->rx_skb_func = bnxt_rx_skb;
3173         }
3174         return 0;
3175 }
3176
3177 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3178 {
3179         int i;
3180         struct bnxt_vnic_info *vnic;
3181         struct pci_dev *pdev = bp->pdev;
3182
3183         if (!bp->vnic_info)
3184                 return;
3185
3186         for (i = 0; i < bp->nr_vnics; i++) {
3187                 vnic = &bp->vnic_info[i];
3188
3189                 kfree(vnic->fw_grp_ids);
3190                 vnic->fw_grp_ids = NULL;
3191
3192                 kfree(vnic->uc_list);
3193                 vnic->uc_list = NULL;
3194
3195                 if (vnic->mc_list) {
3196                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3197                                           vnic->mc_list, vnic->mc_list_mapping);
3198                         vnic->mc_list = NULL;
3199                 }
3200
3201                 if (vnic->rss_table) {
3202                         dma_free_coherent(&pdev->dev, PAGE_SIZE,
3203                                           vnic->rss_table,
3204                                           vnic->rss_table_dma_addr);
3205                         vnic->rss_table = NULL;
3206                 }
3207
3208                 vnic->rss_hash_key = NULL;
3209                 vnic->flags = 0;
3210         }
3211 }
3212
3213 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3214 {
3215         int i, rc = 0, size;
3216         struct bnxt_vnic_info *vnic;
3217         struct pci_dev *pdev = bp->pdev;
3218         int max_rings;
3219
3220         for (i = 0; i < bp->nr_vnics; i++) {
3221                 vnic = &bp->vnic_info[i];
3222
3223                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3224                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3225
3226                         if (mem_size > 0) {
3227                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3228                                 if (!vnic->uc_list) {
3229                                         rc = -ENOMEM;
3230                                         goto out;
3231                                 }
3232                         }
3233                 }
3234
3235                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3236                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3237                         vnic->mc_list =
3238                                 dma_alloc_coherent(&pdev->dev,
3239                                                    vnic->mc_list_size,
3240                                                    &vnic->mc_list_mapping,
3241                                                    GFP_KERNEL);
3242                         if (!vnic->mc_list) {
3243                                 rc = -ENOMEM;
3244                                 goto out;
3245                         }
3246                 }
3247
3248                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3249                         goto vnic_skip_grps;
3250
3251                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3252                         max_rings = bp->rx_nr_rings;
3253                 else
3254                         max_rings = 1;
3255
3256                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3257                 if (!vnic->fw_grp_ids) {
3258                         rc = -ENOMEM;
3259                         goto out;
3260                 }
3261 vnic_skip_grps:
3262                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3263                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3264                         continue;
3265
3266                 /* Allocate rss table and hash key */
3267                 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3268                                                      &vnic->rss_table_dma_addr,
3269                                                      GFP_KERNEL);
3270                 if (!vnic->rss_table) {
3271                         rc = -ENOMEM;
3272                         goto out;
3273                 }
3274
3275                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3276
3277                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3278                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3279         }
3280         return 0;
3281
3282 out:
3283         return rc;
3284 }
3285
3286 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3287 {
3288         struct pci_dev *pdev = bp->pdev;
3289
3290         if (bp->hwrm_cmd_resp_addr) {
3291                 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3292                                   bp->hwrm_cmd_resp_dma_addr);
3293                 bp->hwrm_cmd_resp_addr = NULL;
3294         }
3295
3296         if (bp->hwrm_cmd_kong_resp_addr) {
3297                 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3298                                   bp->hwrm_cmd_kong_resp_addr,
3299                                   bp->hwrm_cmd_kong_resp_dma_addr);
3300                 bp->hwrm_cmd_kong_resp_addr = NULL;
3301         }
3302 }
3303
3304 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3305 {
3306         struct pci_dev *pdev = bp->pdev;
3307
3308         bp->hwrm_cmd_kong_resp_addr =
3309                 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3310                                    &bp->hwrm_cmd_kong_resp_dma_addr,
3311                                    GFP_KERNEL);
3312         if (!bp->hwrm_cmd_kong_resp_addr)
3313                 return -ENOMEM;
3314
3315         return 0;
3316 }
3317
3318 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3319 {
3320         struct pci_dev *pdev = bp->pdev;
3321
3322         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3323                                                    &bp->hwrm_cmd_resp_dma_addr,
3324                                                    GFP_KERNEL);
3325         if (!bp->hwrm_cmd_resp_addr)
3326                 return -ENOMEM;
3327
3328         return 0;
3329 }
3330
3331 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3332 {
3333         if (bp->hwrm_short_cmd_req_addr) {
3334                 struct pci_dev *pdev = bp->pdev;
3335
3336                 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3337                                   bp->hwrm_short_cmd_req_addr,
3338                                   bp->hwrm_short_cmd_req_dma_addr);
3339                 bp->hwrm_short_cmd_req_addr = NULL;
3340         }
3341 }
3342
3343 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3344 {
3345         struct pci_dev *pdev = bp->pdev;
3346
3347         bp->hwrm_short_cmd_req_addr =
3348                 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3349                                    &bp->hwrm_short_cmd_req_dma_addr,
3350                                    GFP_KERNEL);
3351         if (!bp->hwrm_short_cmd_req_addr)
3352                 return -ENOMEM;
3353
3354         return 0;
3355 }
3356
3357 static void bnxt_free_port_stats(struct bnxt *bp)
3358 {
3359         struct pci_dev *pdev = bp->pdev;
3360
3361         bp->flags &= ~BNXT_FLAG_PORT_STATS;
3362         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3363
3364         if (bp->hw_rx_port_stats) {
3365                 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3366                                   bp->hw_rx_port_stats,
3367                                   bp->hw_rx_port_stats_map);
3368                 bp->hw_rx_port_stats = NULL;
3369         }
3370
3371         if (bp->hw_tx_port_stats_ext) {
3372                 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3373                                   bp->hw_tx_port_stats_ext,
3374                                   bp->hw_tx_port_stats_ext_map);
3375                 bp->hw_tx_port_stats_ext = NULL;
3376         }
3377
3378         if (bp->hw_rx_port_stats_ext) {
3379                 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3380                                   bp->hw_rx_port_stats_ext,
3381                                   bp->hw_rx_port_stats_ext_map);
3382                 bp->hw_rx_port_stats_ext = NULL;
3383         }
3384 }
3385
3386 static void bnxt_free_ring_stats(struct bnxt *bp)
3387 {
3388         struct pci_dev *pdev = bp->pdev;
3389         int size, i;
3390
3391         if (!bp->bnapi)
3392                 return;
3393
3394         size = sizeof(struct ctx_hw_stats);
3395
3396         for (i = 0; i < bp->cp_nr_rings; i++) {
3397                 struct bnxt_napi *bnapi = bp->bnapi[i];
3398                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3399
3400                 if (cpr->hw_stats) {
3401                         dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3402                                           cpr->hw_stats_map);
3403                         cpr->hw_stats = NULL;
3404                 }
3405         }
3406 }
3407
3408 static int bnxt_alloc_stats(struct bnxt *bp)
3409 {
3410         u32 size, i;
3411         struct pci_dev *pdev = bp->pdev;
3412
3413         size = sizeof(struct ctx_hw_stats);
3414
3415         for (i = 0; i < bp->cp_nr_rings; i++) {
3416                 struct bnxt_napi *bnapi = bp->bnapi[i];
3417                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3418
3419                 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3420                                                    &cpr->hw_stats_map,
3421                                                    GFP_KERNEL);
3422                 if (!cpr->hw_stats)
3423                         return -ENOMEM;
3424
3425                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3426         }
3427
3428         if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
3429                 if (bp->hw_rx_port_stats)
3430                         goto alloc_ext_stats;
3431
3432                 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3433                                          sizeof(struct tx_port_stats) + 1024;
3434
3435                 bp->hw_rx_port_stats =
3436                         dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3437                                            &bp->hw_rx_port_stats_map,
3438                                            GFP_KERNEL);
3439                 if (!bp->hw_rx_port_stats)
3440                         return -ENOMEM;
3441
3442                 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3443                                        512;
3444                 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3445                                            sizeof(struct rx_port_stats) + 512;
3446                 bp->flags |= BNXT_FLAG_PORT_STATS;
3447
3448 alloc_ext_stats:
3449                 /* Display extended statistics only if FW supports it */
3450                 if (bp->hwrm_spec_code < 0x10804 ||
3451                     bp->hwrm_spec_code == 0x10900)
3452                         return 0;
3453
3454                 if (bp->hw_rx_port_stats_ext)
3455                         goto alloc_tx_ext_stats;
3456
3457                 bp->hw_rx_port_stats_ext =
3458                         dma_alloc_coherent(&pdev->dev,
3459                                            sizeof(struct rx_port_stats_ext),
3460                                            &bp->hw_rx_port_stats_ext_map,
3461                                            GFP_KERNEL);
3462                 if (!bp->hw_rx_port_stats_ext)
3463                         return 0;
3464
3465 alloc_tx_ext_stats:
3466                 if (bp->hw_tx_port_stats_ext)
3467                         return 0;
3468
3469                 if (bp->hwrm_spec_code >= 0x10902) {
3470                         bp->hw_tx_port_stats_ext =
3471                                 dma_alloc_coherent(&pdev->dev,
3472                                                    sizeof(struct tx_port_stats_ext),
3473                                                    &bp->hw_tx_port_stats_ext_map,
3474                                                    GFP_KERNEL);
3475                 }
3476                 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3477         }
3478         return 0;
3479 }
3480
3481 static void bnxt_clear_ring_indices(struct bnxt *bp)
3482 {
3483         int i;
3484
3485         if (!bp->bnapi)
3486                 return;
3487
3488         for (i = 0; i < bp->cp_nr_rings; i++) {
3489                 struct bnxt_napi *bnapi = bp->bnapi[i];
3490                 struct bnxt_cp_ring_info *cpr;
3491                 struct bnxt_rx_ring_info *rxr;
3492                 struct bnxt_tx_ring_info *txr;
3493
3494                 if (!bnapi)
3495                         continue;
3496
3497                 cpr = &bnapi->cp_ring;
3498                 cpr->cp_raw_cons = 0;
3499
3500                 txr = bnapi->tx_ring;
3501                 if (txr) {
3502                         txr->tx_prod = 0;
3503                         txr->tx_cons = 0;
3504                 }
3505
3506                 rxr = bnapi->rx_ring;
3507                 if (rxr) {
3508                         rxr->rx_prod = 0;
3509                         rxr->rx_agg_prod = 0;
3510                         rxr->rx_sw_agg_prod = 0;
3511                         rxr->rx_next_cons = 0;
3512                 }
3513         }
3514 }
3515
3516 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3517 {
3518 #ifdef CONFIG_RFS_ACCEL
3519         int i;
3520
3521         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
3522          * safe to delete the hash table.
3523          */
3524         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3525                 struct hlist_head *head;
3526                 struct hlist_node *tmp;
3527                 struct bnxt_ntuple_filter *fltr;
3528
3529                 head = &bp->ntp_fltr_hash_tbl[i];
3530                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3531                         hlist_del(&fltr->hash);
3532                         kfree(fltr);
3533                 }
3534         }
3535         if (irq_reinit) {
3536                 kfree(bp->ntp_fltr_bmap);
3537                 bp->ntp_fltr_bmap = NULL;
3538         }
3539         bp->ntp_fltr_count = 0;
3540 #endif
3541 }
3542
3543 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3544 {
3545 #ifdef CONFIG_RFS_ACCEL
3546         int i, rc = 0;
3547
3548         if (!(bp->flags & BNXT_FLAG_RFS))
3549                 return 0;
3550
3551         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3552                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3553
3554         bp->ntp_fltr_count = 0;
3555         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3556                                     sizeof(long),
3557                                     GFP_KERNEL);
3558
3559         if (!bp->ntp_fltr_bmap)
3560                 rc = -ENOMEM;
3561
3562         return rc;
3563 #else
3564         return 0;
3565 #endif
3566 }
3567
3568 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3569 {
3570         bnxt_free_vnic_attributes(bp);
3571         bnxt_free_tx_rings(bp);
3572         bnxt_free_rx_rings(bp);
3573         bnxt_free_cp_rings(bp);
3574         bnxt_free_ntp_fltrs(bp, irq_re_init);
3575         if (irq_re_init) {
3576                 bnxt_free_ring_stats(bp);
3577                 bnxt_free_ring_grps(bp);
3578                 bnxt_free_vnics(bp);
3579                 kfree(bp->tx_ring_map);
3580                 bp->tx_ring_map = NULL;
3581                 kfree(bp->tx_ring);
3582                 bp->tx_ring = NULL;
3583                 kfree(bp->rx_ring);
3584                 bp->rx_ring = NULL;
3585                 kfree(bp->bnapi);
3586                 bp->bnapi = NULL;
3587         } else {
3588                 bnxt_clear_ring_indices(bp);
3589         }
3590 }
3591
3592 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3593 {
3594         int i, j, rc, size, arr_size;
3595         void *bnapi;
3596
3597         if (irq_re_init) {
3598                 /* Allocate bnapi mem pointer array and mem block for
3599                  * all queues
3600                  */
3601                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3602                                 bp->cp_nr_rings);
3603                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3604                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3605                 if (!bnapi)
3606                         return -ENOMEM;
3607
3608                 bp->bnapi = bnapi;
3609                 bnapi += arr_size;
3610                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3611                         bp->bnapi[i] = bnapi;
3612                         bp->bnapi[i]->index = i;
3613                         bp->bnapi[i]->bp = bp;
3614                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
3615                                 struct bnxt_cp_ring_info *cpr =
3616                                         &bp->bnapi[i]->cp_ring;
3617
3618                                 cpr->cp_ring_struct.ring_mem.flags =
3619                                         BNXT_RMEM_RING_PTE_FLAG;
3620                         }
3621                 }
3622
3623                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3624                                       sizeof(struct bnxt_rx_ring_info),
3625                                       GFP_KERNEL);
3626                 if (!bp->rx_ring)
3627                         return -ENOMEM;
3628
3629                 for (i = 0; i < bp->rx_nr_rings; i++) {
3630                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3631
3632                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
3633                                 rxr->rx_ring_struct.ring_mem.flags =
3634                                         BNXT_RMEM_RING_PTE_FLAG;
3635                                 rxr->rx_agg_ring_struct.ring_mem.flags =
3636                                         BNXT_RMEM_RING_PTE_FLAG;
3637                         }
3638                         rxr->bnapi = bp->bnapi[i];
3639                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3640                 }
3641
3642                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3643                                       sizeof(struct bnxt_tx_ring_info),
3644                                       GFP_KERNEL);
3645                 if (!bp->tx_ring)
3646                         return -ENOMEM;
3647
3648                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3649                                           GFP_KERNEL);
3650
3651                 if (!bp->tx_ring_map)
3652                         return -ENOMEM;
3653
3654                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3655                         j = 0;
3656                 else
3657                         j = bp->rx_nr_rings;
3658
3659                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3660                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3661
3662                         if (bp->flags & BNXT_FLAG_CHIP_P5)
3663                                 txr->tx_ring_struct.ring_mem.flags =
3664                                         BNXT_RMEM_RING_PTE_FLAG;
3665                         txr->bnapi = bp->bnapi[j];
3666                         bp->bnapi[j]->tx_ring = txr;
3667                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
3668                         if (i >= bp->tx_nr_rings_xdp) {
3669                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
3670                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
3671                         } else {
3672                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
3673                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3674                         }
3675                 }
3676
3677                 rc = bnxt_alloc_stats(bp);
3678                 if (rc)
3679                         goto alloc_mem_err;
3680
3681                 rc = bnxt_alloc_ntp_fltrs(bp);
3682                 if (rc)
3683                         goto alloc_mem_err;
3684
3685                 rc = bnxt_alloc_vnics(bp);
3686                 if (rc)
3687                         goto alloc_mem_err;
3688         }
3689
3690         bnxt_init_ring_struct(bp);
3691
3692         rc = bnxt_alloc_rx_rings(bp);
3693         if (rc)
3694                 goto alloc_mem_err;
3695
3696         rc = bnxt_alloc_tx_rings(bp);
3697         if (rc)
3698                 goto alloc_mem_err;
3699
3700         rc = bnxt_alloc_cp_rings(bp);
3701         if (rc)
3702                 goto alloc_mem_err;
3703
3704         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3705                                   BNXT_VNIC_UCAST_FLAG;
3706         rc = bnxt_alloc_vnic_attributes(bp);
3707         if (rc)
3708                 goto alloc_mem_err;
3709         return 0;
3710
3711 alloc_mem_err:
3712         bnxt_free_mem(bp, true);
3713         return rc;
3714 }
3715
3716 static void bnxt_disable_int(struct bnxt *bp)
3717 {
3718         int i;
3719
3720         if (!bp->bnapi)
3721                 return;
3722
3723         for (i = 0; i < bp->cp_nr_rings; i++) {
3724                 struct bnxt_napi *bnapi = bp->bnapi[i];
3725                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3726                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3727
3728                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
3729                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3730         }
3731 }
3732
3733 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
3734 {
3735         struct bnxt_napi *bnapi = bp->bnapi[n];
3736         struct bnxt_cp_ring_info *cpr;
3737
3738         cpr = &bnapi->cp_ring;
3739         return cpr->cp_ring_struct.map_idx;
3740 }
3741
3742 static void bnxt_disable_int_sync(struct bnxt *bp)
3743 {
3744         int i;
3745
3746         atomic_inc(&bp->intr_sem);
3747
3748         bnxt_disable_int(bp);
3749         for (i = 0; i < bp->cp_nr_rings; i++) {
3750                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
3751
3752                 synchronize_irq(bp->irq_tbl[map_idx].vector);
3753         }
3754 }
3755
3756 static void bnxt_enable_int(struct bnxt *bp)
3757 {
3758         int i;
3759
3760         atomic_set(&bp->intr_sem, 0);
3761         for (i = 0; i < bp->cp_nr_rings; i++) {
3762                 struct bnxt_napi *bnapi = bp->bnapi[i];
3763                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3764
3765                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
3766         }
3767 }
3768
3769 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3770                             u16 cmpl_ring, u16 target_id)
3771 {
3772         struct input *req = request;
3773
3774         req->req_type = cpu_to_le16(req_type);
3775         req->cmpl_ring = cpu_to_le16(cmpl_ring);
3776         req->target_id = cpu_to_le16(target_id);
3777         if (bnxt_kong_hwrm_message(bp, req))
3778                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
3779         else
3780                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3781 }
3782
3783 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3784                                  int timeout, bool silent)
3785 {
3786         int i, intr_process, rc, tmo_count;
3787         struct input *req = msg;
3788         u32 *data = msg;
3789         __le32 *resp_len;
3790         u8 *valid;
3791         u16 cp_ring_id, len = 0;
3792         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3793         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
3794         struct hwrm_short_input short_input = {0};
3795         u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
3796         u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
3797         u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
3798         u16 dst = BNXT_HWRM_CHNL_CHIMP;
3799
3800         if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
3801                 if (msg_len > bp->hwrm_max_ext_req_len ||
3802                     !bp->hwrm_short_cmd_req_addr)
3803                         return -EINVAL;
3804         }
3805
3806         if (bnxt_hwrm_kong_chnl(bp, req)) {
3807                 dst = BNXT_HWRM_CHNL_KONG;
3808                 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
3809                 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
3810                 resp = bp->hwrm_cmd_kong_resp_addr;
3811                 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
3812         }
3813
3814         memset(resp, 0, PAGE_SIZE);
3815         cp_ring_id = le16_to_cpu(req->cmpl_ring);
3816         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3817
3818         req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
3819         /* currently supports only one outstanding message */
3820         if (intr_process)
3821                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
3822
3823         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
3824             msg_len > BNXT_HWRM_MAX_REQ_LEN) {
3825                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3826                 u16 max_msg_len;
3827
3828                 /* Set boundary for maximum extended request length for short
3829                  * cmd format. If passed up from device use the max supported
3830                  * internal req length.
3831                  */
3832                 max_msg_len = bp->hwrm_max_ext_req_len;
3833
3834                 memcpy(short_cmd_req, req, msg_len);
3835                 if (msg_len < max_msg_len)
3836                         memset(short_cmd_req + msg_len, 0,
3837                                max_msg_len - msg_len);
3838
3839                 short_input.req_type = req->req_type;
3840                 short_input.signature =
3841                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3842                 short_input.size = cpu_to_le16(msg_len);
3843                 short_input.req_addr =
3844                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3845
3846                 data = (u32 *)&short_input;
3847                 msg_len = sizeof(short_input);
3848
3849                 /* Sync memory write before updating doorbell */
3850                 wmb();
3851
3852                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3853         }
3854
3855         /* Write request msg to hwrm channel */
3856         __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
3857
3858         for (i = msg_len; i < max_req_len; i += 4)
3859                 writel(0, bp->bar0 + bar_offset + i);
3860
3861         /* Ring channel doorbell */
3862         writel(1, bp->bar0 + doorbell_offset);
3863
3864         if (!timeout)
3865                 timeout = DFLT_HWRM_CMD_TIMEOUT;
3866         /* convert timeout to usec */
3867         timeout *= 1000;
3868
3869         i = 0;
3870         /* Short timeout for the first few iterations:
3871          * number of loops = number of loops for short timeout +
3872          * number of loops for standard timeout.
3873          */
3874         tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
3875         timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
3876         tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
3877         resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
3878
3879         if (intr_process) {
3880                 u16 seq_id = bp->hwrm_intr_seq_id;
3881
3882                 /* Wait until hwrm response cmpl interrupt is processed */
3883                 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
3884                        i++ < tmo_count) {
3885                         /* on first few passes, just barely sleep */
3886                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3887                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3888                                              HWRM_SHORT_MAX_TIMEOUT);
3889                         else
3890                                 usleep_range(HWRM_MIN_TIMEOUT,
3891                                              HWRM_MAX_TIMEOUT);
3892                 }
3893
3894                 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
3895                         netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
3896                                    le16_to_cpu(req->req_type));
3897                         return -1;
3898                 }
3899                 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3900                       HWRM_RESP_LEN_SFT;
3901                 valid = resp_addr + len - 1;
3902         } else {
3903                 int j;
3904
3905                 /* Check if response len is updated */
3906                 for (i = 0; i < tmo_count; i++) {
3907                         len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3908                               HWRM_RESP_LEN_SFT;
3909                         if (len)
3910                                 break;
3911                         /* on first few passes, just barely sleep */
3912                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3913                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3914                                              HWRM_SHORT_MAX_TIMEOUT);
3915                         else
3916                                 usleep_range(HWRM_MIN_TIMEOUT,
3917                                              HWRM_MAX_TIMEOUT);
3918                 }
3919
3920                 if (i >= tmo_count) {
3921                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
3922                                    HWRM_TOTAL_TIMEOUT(i),
3923                                    le16_to_cpu(req->req_type),
3924                                    le16_to_cpu(req->seq_id), len);
3925                         return -1;
3926                 }
3927
3928                 /* Last byte of resp contains valid bit */
3929                 valid = resp_addr + len - 1;
3930                 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
3931                         /* make sure we read from updated DMA memory */
3932                         dma_rmb();
3933                         if (*valid)
3934                                 break;
3935                         usleep_range(1, 5);
3936                 }
3937
3938                 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
3939                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
3940                                    HWRM_TOTAL_TIMEOUT(i),
3941                                    le16_to_cpu(req->req_type),
3942                                    le16_to_cpu(req->seq_id), len, *valid);
3943                         return -1;
3944                 }
3945         }
3946
3947         /* Zero valid bit for compatibility.  Valid bit in an older spec
3948          * may become a new field in a newer spec.  We must make sure that
3949          * a new field not implemented by old spec will read zero.
3950          */
3951         *valid = 0;
3952         rc = le16_to_cpu(resp->error_code);
3953         if (rc && !silent)
3954                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3955                            le16_to_cpu(resp->req_type),
3956                            le16_to_cpu(resp->seq_id), rc);
3957         return rc;
3958 }
3959
3960 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3961 {
3962         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3963 }
3964
3965 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3966                               int timeout)
3967 {
3968         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3969 }
3970
3971 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3972 {
3973         int rc;
3974
3975         mutex_lock(&bp->hwrm_cmd_lock);
3976         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3977         mutex_unlock(&bp->hwrm_cmd_lock);
3978         return rc;
3979 }
3980
3981 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3982                              int timeout)
3983 {
3984         int rc;
3985
3986         mutex_lock(&bp->hwrm_cmd_lock);
3987         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3988         mutex_unlock(&bp->hwrm_cmd_lock);
3989         return rc;
3990 }
3991
3992 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3993                                      int bmap_size)
3994 {
3995         struct hwrm_func_drv_rgtr_input req = {0};
3996         DECLARE_BITMAP(async_events_bmap, 256);
3997         u32 *events = (u32 *)async_events_bmap;
3998         int i;
3999
4000         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4001
4002         req.enables =
4003                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4004
4005         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4006         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
4007                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4008
4009         if (bmap && bmap_size) {
4010                 for (i = 0; i < bmap_size; i++) {
4011                         if (test_bit(i, bmap))
4012                                 __set_bit(i, async_events_bmap);
4013                 }
4014         }
4015
4016         for (i = 0; i < 8; i++)
4017                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4018
4019         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4020 }
4021
4022 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
4023 {
4024         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4025         struct hwrm_func_drv_rgtr_input req = {0};
4026         int rc;
4027
4028         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4029
4030         req.enables =
4031                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4032                             FUNC_DRV_RGTR_REQ_ENABLES_VER);
4033
4034         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4035         req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
4036         req.ver_maj_8b = DRV_VER_MAJ;
4037         req.ver_min_8b = DRV_VER_MIN;
4038         req.ver_upd_8b = DRV_VER_UPD;
4039         req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4040         req.ver_min = cpu_to_le16(DRV_VER_MIN);
4041         req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4042
4043         if (BNXT_PF(bp)) {
4044                 u32 data[8];
4045                 int i;
4046
4047                 memset(data, 0, sizeof(data));
4048                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4049                         u16 cmd = bnxt_vf_req_snif[i];
4050                         unsigned int bit, idx;
4051
4052                         idx = cmd / 32;
4053                         bit = cmd % 32;
4054                         data[idx] |= 1 << bit;
4055                 }
4056
4057                 for (i = 0; i < 8; i++)
4058                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4059
4060                 req.enables |=
4061                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4062         }
4063
4064         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4065                 req.flags |= cpu_to_le32(
4066                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4067
4068         mutex_lock(&bp->hwrm_cmd_lock);
4069         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4070         if (rc)
4071                 rc = -EIO;
4072         else if (resp->flags &
4073                  cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4074                 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4075         mutex_unlock(&bp->hwrm_cmd_lock);
4076         return rc;
4077 }
4078
4079 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4080 {
4081         struct hwrm_func_drv_unrgtr_input req = {0};
4082
4083         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4084         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4085 }
4086
4087 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4088 {
4089         u32 rc = 0;
4090         struct hwrm_tunnel_dst_port_free_input req = {0};
4091
4092         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4093         req.tunnel_type = tunnel_type;
4094
4095         switch (tunnel_type) {
4096         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4097                 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4098                 break;
4099         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4100                 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4101                 break;
4102         default:
4103                 break;
4104         }
4105
4106         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4107         if (rc)
4108                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4109                            rc);
4110         return rc;
4111 }
4112
4113 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4114                                            u8 tunnel_type)
4115 {
4116         u32 rc = 0;
4117         struct hwrm_tunnel_dst_port_alloc_input req = {0};
4118         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4119
4120         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4121
4122         req.tunnel_type = tunnel_type;
4123         req.tunnel_dst_port_val = port;
4124
4125         mutex_lock(&bp->hwrm_cmd_lock);
4126         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4127         if (rc) {
4128                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4129                            rc);
4130                 goto err_out;
4131         }
4132
4133         switch (tunnel_type) {
4134         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4135                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
4136                 break;
4137         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4138                 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
4139                 break;
4140         default:
4141                 break;
4142         }
4143
4144 err_out:
4145         mutex_unlock(&bp->hwrm_cmd_lock);
4146         return rc;
4147 }
4148
4149 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4150 {
4151         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4152         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4153
4154         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4155         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4156
4157         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4158         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4159         req.mask = cpu_to_le32(vnic->rx_mask);
4160         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4161 }
4162
4163 #ifdef CONFIG_RFS_ACCEL
4164 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4165                                             struct bnxt_ntuple_filter *fltr)
4166 {
4167         struct hwrm_cfa_ntuple_filter_free_input req = {0};
4168
4169         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4170         req.ntuple_filter_id = fltr->filter_id;
4171         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4172 }
4173
4174 #define BNXT_NTP_FLTR_FLAGS                                     \
4175         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4176          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4177          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4178          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4179          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4180          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4181          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4182          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4183          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4184          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4185          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4186          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4187          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4188          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4189
4190 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4191                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4192
4193 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4194                                              struct bnxt_ntuple_filter *fltr)
4195 {
4196         struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
4197         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4198         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4199         struct flow_keys *keys = &fltr->fkeys;
4200         int rc = 0;
4201
4202         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4203         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4204
4205         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4206
4207         req.ethertype = htons(ETH_P_IP);
4208         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4209         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4210         req.ip_protocol = keys->basic.ip_proto;
4211
4212         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4213                 int i;
4214
4215                 req.ethertype = htons(ETH_P_IPV6);
4216                 req.ip_addr_type =
4217                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4218                 *(struct in6_addr *)&req.src_ipaddr[0] =
4219                         keys->addrs.v6addrs.src;
4220                 *(struct in6_addr *)&req.dst_ipaddr[0] =
4221                         keys->addrs.v6addrs.dst;
4222                 for (i = 0; i < 4; i++) {
4223                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4224                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4225                 }
4226         } else {
4227                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4228                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4229                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4230                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4231         }
4232         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4233                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4234                 req.tunnel_type =
4235                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4236         }
4237
4238         req.src_port = keys->ports.src;
4239         req.src_port_mask = cpu_to_be16(0xffff);
4240         req.dst_port = keys->ports.dst;
4241         req.dst_port_mask = cpu_to_be16(0xffff);
4242
4243         req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4244         mutex_lock(&bp->hwrm_cmd_lock);
4245         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4246         if (!rc) {
4247                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4248                 fltr->filter_id = resp->ntuple_filter_id;
4249         }
4250         mutex_unlock(&bp->hwrm_cmd_lock);
4251         return rc;
4252 }
4253 #endif
4254
4255 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4256                                      u8 *mac_addr)
4257 {
4258         u32 rc = 0;
4259         struct hwrm_cfa_l2_filter_alloc_input req = {0};
4260         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4261
4262         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4263         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4264         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4265                 req.flags |=
4266                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4267         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4268         req.enables =
4269                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4270                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4271                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4272         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4273         req.l2_addr_mask[0] = 0xff;
4274         req.l2_addr_mask[1] = 0xff;
4275         req.l2_addr_mask[2] = 0xff;
4276         req.l2_addr_mask[3] = 0xff;
4277         req.l2_addr_mask[4] = 0xff;
4278         req.l2_addr_mask[5] = 0xff;
4279
4280         mutex_lock(&bp->hwrm_cmd_lock);
4281         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4282         if (!rc)
4283                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4284                                                         resp->l2_filter_id;
4285         mutex_unlock(&bp->hwrm_cmd_lock);
4286         return rc;
4287 }
4288
4289 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4290 {
4291         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4292         int rc = 0;
4293
4294         /* Any associated ntuple filters will also be cleared by firmware. */
4295         mutex_lock(&bp->hwrm_cmd_lock);
4296         for (i = 0; i < num_of_vnics; i++) {
4297                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4298
4299                 for (j = 0; j < vnic->uc_filter_count; j++) {
4300                         struct hwrm_cfa_l2_filter_free_input req = {0};
4301
4302                         bnxt_hwrm_cmd_hdr_init(bp, &req,
4303                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
4304
4305                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
4306
4307                         rc = _hwrm_send_message(bp, &req, sizeof(req),
4308                                                 HWRM_CMD_TIMEOUT);
4309                 }
4310                 vnic->uc_filter_count = 0;
4311         }
4312         mutex_unlock(&bp->hwrm_cmd_lock);
4313
4314         return rc;
4315 }
4316
4317 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4318 {
4319         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4320         struct hwrm_vnic_tpa_cfg_input req = {0};
4321
4322         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4323                 return 0;
4324
4325         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4326
4327         if (tpa_flags) {
4328                 u16 mss = bp->dev->mtu - 40;
4329                 u32 nsegs, n, segs = 0, flags;
4330
4331                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4332                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4333                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4334                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4335                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4336                 if (tpa_flags & BNXT_FLAG_GRO)
4337                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4338
4339                 req.flags = cpu_to_le32(flags);
4340
4341                 req.enables =
4342                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4343                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4344                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4345
4346                 /* Number of segs are log2 units, and first packet is not
4347                  * included as part of this units.
4348                  */
4349                 if (mss <= BNXT_RX_PAGE_SIZE) {
4350                         n = BNXT_RX_PAGE_SIZE / mss;
4351                         nsegs = (MAX_SKB_FRAGS - 1) * n;
4352                 } else {
4353                         n = mss / BNXT_RX_PAGE_SIZE;
4354                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
4355                                 n++;
4356                         nsegs = (MAX_SKB_FRAGS - n) / n;
4357                 }
4358
4359                 segs = ilog2(nsegs);
4360                 req.max_agg_segs = cpu_to_le16(segs);
4361                 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
4362
4363                 req.min_agg_len = cpu_to_le32(512);
4364         }
4365         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4366
4367         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4368 }
4369
4370 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4371 {
4372         struct bnxt_ring_grp_info *grp_info;
4373
4374         grp_info = &bp->grp_info[ring->grp_idx];
4375         return grp_info->cp_fw_ring_id;
4376 }
4377
4378 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4379 {
4380         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4381                 struct bnxt_napi *bnapi = rxr->bnapi;
4382                 struct bnxt_cp_ring_info *cpr;
4383
4384                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4385                 return cpr->cp_ring_struct.fw_ring_id;
4386         } else {
4387                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4388         }
4389 }
4390
4391 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4392 {
4393         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4394                 struct bnxt_napi *bnapi = txr->bnapi;
4395                 struct bnxt_cp_ring_info *cpr;
4396
4397                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4398                 return cpr->cp_ring_struct.fw_ring_id;
4399         } else {
4400                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4401         }
4402 }
4403
4404 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4405 {
4406         u32 i, j, max_rings;
4407         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4408         struct hwrm_vnic_rss_cfg_input req = {0};
4409
4410         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4411             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
4412                 return 0;
4413
4414         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4415         if (set_rss) {
4416                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4417                 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4418                 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4419                         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4420                                 max_rings = bp->rx_nr_rings - 1;
4421                         else
4422                                 max_rings = bp->rx_nr_rings;
4423                 } else {
4424                         max_rings = 1;
4425                 }
4426
4427                 /* Fill the RSS indirection table with ring group ids */
4428                 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4429                         if (j == max_rings)
4430                                 j = 0;
4431                         vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4432                 }
4433
4434                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4435                 req.hash_key_tbl_addr =
4436                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
4437         }
4438         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4439         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4440 }
4441
4442 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4443 {
4444         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4445         u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4446         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4447         struct hwrm_vnic_rss_cfg_input req = {0};
4448
4449         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4450         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4451         if (!set_rss) {
4452                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4453                 return 0;
4454         }
4455         req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4456         req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4457         req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4458         req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4459         nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4460         for (i = 0, k = 0; i < nr_ctxs; i++) {
4461                 __le16 *ring_tbl = vnic->rss_table;
4462                 int rc;
4463
4464                 req.ring_table_pair_index = i;
4465                 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4466                 for (j = 0; j < 64; j++) {
4467                         u16 ring_id;
4468
4469                         ring_id = rxr->rx_ring_struct.fw_ring_id;
4470                         *ring_tbl++ = cpu_to_le16(ring_id);
4471                         ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4472                         *ring_tbl++ = cpu_to_le16(ring_id);
4473                         rxr++;
4474                         k++;
4475                         if (k == max_rings) {
4476                                 k = 0;
4477                                 rxr = &bp->rx_ring[0];
4478                         }
4479                 }
4480                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4481                 if (rc)
4482                         return -EIO;
4483         }
4484         return 0;
4485 }
4486
4487 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4488 {
4489         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4490         struct hwrm_vnic_plcmodes_cfg_input req = {0};
4491
4492         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4493         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4494                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4495                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4496         req.enables =
4497                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4498                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4499         /* thresholds not implemented in firmware yet */
4500         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4501         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4502         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4503         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4504 }
4505
4506 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4507                                         u16 ctx_idx)
4508 {
4509         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4510
4511         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4512         req.rss_cos_lb_ctx_id =
4513                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
4514
4515         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4516         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
4517 }
4518
4519 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4520 {
4521         int i, j;
4522
4523         for (i = 0; i < bp->nr_vnics; i++) {
4524                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4525
4526                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4527                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4528                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4529                 }
4530         }
4531         bp->rsscos_nr_ctxs = 0;
4532 }
4533
4534 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
4535 {
4536         int rc;
4537         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4538         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4539                                                 bp->hwrm_cmd_resp_addr;
4540
4541         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4542                                -1);
4543
4544         mutex_lock(&bp->hwrm_cmd_lock);
4545         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4546         if (!rc)
4547                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
4548                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
4549         mutex_unlock(&bp->hwrm_cmd_lock);
4550
4551         return rc;
4552 }
4553
4554 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4555 {
4556         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4557                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4558         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4559 }
4560
4561 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
4562 {
4563         unsigned int ring = 0, grp_idx;
4564         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4565         struct hwrm_vnic_cfg_input req = {0};
4566         u16 def_vlan = 0;
4567
4568         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
4569
4570         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4571                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4572
4573                 req.default_rx_ring_id =
4574                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
4575                 req.default_cmpl_ring_id =
4576                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
4577                 req.enables =
4578                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
4579                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
4580                 goto vnic_mru;
4581         }
4582         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
4583         /* Only RSS support for now TBD: COS & LB */
4584         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4585                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4586                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4587                                            VNIC_CFG_REQ_ENABLES_MRU);
4588         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4589                 req.rss_rule =
4590                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4591                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4592                                            VNIC_CFG_REQ_ENABLES_MRU);
4593                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
4594         } else {
4595                 req.rss_rule = cpu_to_le16(0xffff);
4596         }
4597
4598         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4599             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
4600                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4601                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4602         } else {
4603                 req.cos_rule = cpu_to_le16(0xffff);
4604         }
4605
4606         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4607                 ring = 0;
4608         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
4609                 ring = vnic_id - 1;
4610         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4611                 ring = bp->rx_nr_rings - 1;
4612
4613         grp_idx = bp->rx_ring[ring].bnapi->index;
4614         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
4615         req.lb_rule = cpu_to_le16(0xffff);
4616 vnic_mru:
4617         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4618                               VLAN_HLEN);
4619
4620         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4621 #ifdef CONFIG_BNXT_SRIOV
4622         if (BNXT_VF(bp))
4623                 def_vlan = bp->vf.vlan;
4624 #endif
4625         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
4626                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
4627         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
4628                 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
4629
4630         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4631 }
4632
4633 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4634 {
4635         u32 rc = 0;
4636
4637         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4638                 struct hwrm_vnic_free_input req = {0};
4639
4640                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4641                 req.vnic_id =
4642                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4643
4644                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4645                 if (rc)
4646                         return rc;
4647                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4648         }
4649         return rc;
4650 }
4651
4652 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4653 {
4654         u16 i;
4655
4656         for (i = 0; i < bp->nr_vnics; i++)
4657                 bnxt_hwrm_vnic_free_one(bp, i);
4658 }
4659
4660 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4661                                 unsigned int start_rx_ring_idx,
4662                                 unsigned int nr_rings)
4663 {
4664         int rc = 0;
4665         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
4666         struct hwrm_vnic_alloc_input req = {0};
4667         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4668         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4669
4670         if (bp->flags & BNXT_FLAG_CHIP_P5)
4671                 goto vnic_no_ring_grps;
4672
4673         /* map ring groups to this vnic */
4674         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4675                 grp_idx = bp->rx_ring[i].bnapi->index;
4676                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
4677                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
4678                                    j, nr_rings);
4679                         break;
4680                 }
4681                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
4682         }
4683
4684 vnic_no_ring_grps:
4685         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
4686                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
4687         if (vnic_id == 0)
4688                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4689
4690         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4691
4692         mutex_lock(&bp->hwrm_cmd_lock);
4693         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4694         if (!rc)
4695                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
4696         mutex_unlock(&bp->hwrm_cmd_lock);
4697         return rc;
4698 }
4699
4700 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4701 {
4702         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4703         struct hwrm_vnic_qcaps_input req = {0};
4704         int rc;
4705
4706         if (bp->hwrm_spec_code < 0x10600)
4707                 return 0;
4708
4709         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4710         mutex_lock(&bp->hwrm_cmd_lock);
4711         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4712         if (!rc) {
4713                 u32 flags = le32_to_cpu(resp->flags);
4714
4715                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
4716                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
4717                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
4718                 if (flags &
4719                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
4720                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
4721         }
4722         mutex_unlock(&bp->hwrm_cmd_lock);
4723         return rc;
4724 }
4725
4726 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4727 {
4728         u16 i;
4729         u32 rc = 0;
4730
4731         if (bp->flags & BNXT_FLAG_CHIP_P5)
4732                 return 0;
4733
4734         mutex_lock(&bp->hwrm_cmd_lock);
4735         for (i = 0; i < bp->rx_nr_rings; i++) {
4736                 struct hwrm_ring_grp_alloc_input req = {0};
4737                 struct hwrm_ring_grp_alloc_output *resp =
4738                                         bp->hwrm_cmd_resp_addr;
4739                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
4740
4741                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4742
4743                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4744                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4745                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4746                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
4747
4748                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4749                                         HWRM_CMD_TIMEOUT);
4750                 if (rc)
4751                         break;
4752
4753                 bp->grp_info[grp_idx].fw_grp_id =
4754                         le32_to_cpu(resp->ring_group_id);
4755         }
4756         mutex_unlock(&bp->hwrm_cmd_lock);
4757         return rc;
4758 }
4759
4760 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4761 {
4762         u16 i;
4763         u32 rc = 0;
4764         struct hwrm_ring_grp_free_input req = {0};
4765
4766         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
4767                 return 0;
4768
4769         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4770
4771         mutex_lock(&bp->hwrm_cmd_lock);
4772         for (i = 0; i < bp->cp_nr_rings; i++) {
4773                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4774                         continue;
4775                 req.ring_group_id =
4776                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
4777
4778                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4779                                         HWRM_CMD_TIMEOUT);
4780                 if (rc)
4781                         break;
4782                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4783         }
4784         mutex_unlock(&bp->hwrm_cmd_lock);
4785         return rc;
4786 }
4787
4788 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4789                                     struct bnxt_ring_struct *ring,
4790                                     u32 ring_type, u32 map_index)
4791 {
4792         int rc = 0, err = 0;
4793         struct hwrm_ring_alloc_input req = {0};
4794         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4795         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
4796         struct bnxt_ring_grp_info *grp_info;
4797         u16 ring_id;
4798
4799         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4800
4801         req.enables = 0;
4802         if (rmem->nr_pages > 1) {
4803                 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
4804                 /* Page size is in log2 units */
4805                 req.page_size = BNXT_PAGE_SHIFT;
4806                 req.page_tbl_depth = 1;
4807         } else {
4808                 req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
4809         }
4810         req.fbo = 0;
4811         /* Association of ring index with doorbell index and MSIX number */
4812         req.logical_id = cpu_to_le16(map_index);
4813
4814         switch (ring_type) {
4815         case HWRM_RING_ALLOC_TX: {
4816                 struct bnxt_tx_ring_info *txr;
4817
4818                 txr = container_of(ring, struct bnxt_tx_ring_info,
4819                                    tx_ring_struct);
4820                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4821                 /* Association of transmit ring with completion ring */
4822                 grp_info = &bp->grp_info[ring->grp_idx];
4823                 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
4824                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
4825                 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4826                 req.queue_id = cpu_to_le16(ring->queue_id);
4827                 break;
4828         }
4829         case HWRM_RING_ALLOC_RX:
4830                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4831                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
4832                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4833                         u16 flags = 0;
4834
4835                         /* Association of rx ring with stats context */
4836                         grp_info = &bp->grp_info[ring->grp_idx];
4837                         req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
4838                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4839                         req.enables |= cpu_to_le32(
4840                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4841                         if (NET_IP_ALIGN == 2)
4842                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
4843                         req.flags = cpu_to_le16(flags);
4844                 }
4845                 break;
4846         case HWRM_RING_ALLOC_AGG:
4847                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4848                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
4849                         /* Association of agg ring with rx ring */
4850                         grp_info = &bp->grp_info[ring->grp_idx];
4851                         req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
4852                         req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
4853                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4854                         req.enables |= cpu_to_le32(
4855                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
4856                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4857                 } else {
4858                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4859                 }
4860                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4861                 break;
4862         case HWRM_RING_ALLOC_CMPL:
4863                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
4864                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4865                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4866                         /* Association of cp ring with nq */
4867                         grp_info = &bp->grp_info[map_index];
4868                         req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
4869                         req.cq_handle = cpu_to_le64(ring->handle);
4870                         req.enables |= cpu_to_le32(
4871                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
4872                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
4873                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4874                 }
4875                 break;
4876         case HWRM_RING_ALLOC_NQ:
4877                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
4878                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4879                 if (bp->flags & BNXT_FLAG_USING_MSIX)
4880                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4881                 break;
4882         default:
4883                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4884                            ring_type);
4885                 return -1;
4886         }
4887
4888         mutex_lock(&bp->hwrm_cmd_lock);
4889         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4890         err = le16_to_cpu(resp->error_code);
4891         ring_id = le16_to_cpu(resp->ring_id);
4892         mutex_unlock(&bp->hwrm_cmd_lock);
4893
4894         if (rc || err) {
4895                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
4896                            ring_type, rc, err);
4897                 return -EIO;
4898         }
4899         ring->fw_ring_id = ring_id;
4900         return rc;
4901 }
4902
4903 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4904 {
4905         int rc;
4906
4907         if (BNXT_PF(bp)) {
4908                 struct hwrm_func_cfg_input req = {0};
4909
4910                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4911                 req.fid = cpu_to_le16(0xffff);
4912                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4913                 req.async_event_cr = cpu_to_le16(idx);
4914                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4915         } else {
4916                 struct hwrm_func_vf_cfg_input req = {0};
4917
4918                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4919                 req.enables =
4920                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4921                 req.async_event_cr = cpu_to_le16(idx);
4922                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4923         }
4924         return rc;
4925 }
4926
4927 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
4928                         u32 map_idx, u32 xid)
4929 {
4930         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4931                 if (BNXT_PF(bp))
4932                         db->doorbell = bp->bar1 + 0x10000;
4933                 else
4934                         db->doorbell = bp->bar1 + 0x4000;
4935                 switch (ring_type) {
4936                 case HWRM_RING_ALLOC_TX:
4937                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
4938                         break;
4939                 case HWRM_RING_ALLOC_RX:
4940                 case HWRM_RING_ALLOC_AGG:
4941                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
4942                         break;
4943                 case HWRM_RING_ALLOC_CMPL:
4944                         db->db_key64 = DBR_PATH_L2;
4945                         break;
4946                 case HWRM_RING_ALLOC_NQ:
4947                         db->db_key64 = DBR_PATH_L2;
4948                         break;
4949                 }
4950                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
4951         } else {
4952                 db->doorbell = bp->bar1 + map_idx * 0x80;
4953                 switch (ring_type) {
4954                 case HWRM_RING_ALLOC_TX:
4955                         db->db_key32 = DB_KEY_TX;
4956                         break;
4957                 case HWRM_RING_ALLOC_RX:
4958                 case HWRM_RING_ALLOC_AGG:
4959                         db->db_key32 = DB_KEY_RX;
4960                         break;
4961                 case HWRM_RING_ALLOC_CMPL:
4962                         db->db_key32 = DB_KEY_CP;
4963                         break;
4964                 }
4965         }
4966 }
4967
4968 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4969 {
4970         int i, rc = 0;
4971         u32 type;
4972
4973         if (bp->flags & BNXT_FLAG_CHIP_P5)
4974                 type = HWRM_RING_ALLOC_NQ;
4975         else
4976                 type = HWRM_RING_ALLOC_CMPL;
4977         for (i = 0; i < bp->cp_nr_rings; i++) {
4978                 struct bnxt_napi *bnapi = bp->bnapi[i];
4979                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4980                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4981                 u32 map_idx = ring->map_idx;
4982                 unsigned int vector;
4983
4984                 vector = bp->irq_tbl[map_idx].vector;
4985                 disable_irq_nosync(vector);
4986                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
4987                 if (rc) {
4988                         enable_irq(vector);
4989                         goto err_out;
4990                 }
4991                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
4992                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4993                 enable_irq(vector);
4994                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
4995
4996                 if (!i) {
4997                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4998                         if (rc)
4999                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5000                 }
5001         }
5002
5003         type = HWRM_RING_ALLOC_TX;
5004         for (i = 0; i < bp->tx_nr_rings; i++) {
5005                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5006                 struct bnxt_ring_struct *ring;
5007                 u32 map_idx;
5008
5009                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5010                         struct bnxt_napi *bnapi = txr->bnapi;
5011                         struct bnxt_cp_ring_info *cpr, *cpr2;
5012                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5013
5014                         cpr = &bnapi->cp_ring;
5015                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5016                         ring = &cpr2->cp_ring_struct;
5017                         ring->handle = BNXT_TX_HDL;
5018                         map_idx = bnapi->index;
5019                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5020                         if (rc)
5021                                 goto err_out;
5022                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5023                                     ring->fw_ring_id);
5024                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5025                 }
5026                 ring = &txr->tx_ring_struct;
5027                 map_idx = i;
5028                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5029                 if (rc)
5030                         goto err_out;
5031                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5032         }
5033
5034         type = HWRM_RING_ALLOC_RX;
5035         for (i = 0; i < bp->rx_nr_rings; i++) {
5036                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5037                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5038                 struct bnxt_napi *bnapi = rxr->bnapi;
5039                 u32 map_idx = bnapi->index;
5040
5041                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5042                 if (rc)
5043                         goto err_out;
5044                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5045                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5046                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5047                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5048                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5049                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5050                         struct bnxt_cp_ring_info *cpr2;
5051
5052                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5053                         ring = &cpr2->cp_ring_struct;
5054                         ring->handle = BNXT_RX_HDL;
5055                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5056                         if (rc)
5057                                 goto err_out;
5058                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5059                                     ring->fw_ring_id);
5060                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5061                 }
5062         }
5063
5064         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5065                 type = HWRM_RING_ALLOC_AGG;
5066                 for (i = 0; i < bp->rx_nr_rings; i++) {
5067                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5068                         struct bnxt_ring_struct *ring =
5069                                                 &rxr->rx_agg_ring_struct;
5070                         u32 grp_idx = ring->grp_idx;
5071                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5072
5073                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5074                         if (rc)
5075                                 goto err_out;
5076
5077                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5078                                     ring->fw_ring_id);
5079                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5080                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5081                 }
5082         }
5083 err_out:
5084         return rc;
5085 }
5086
5087 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5088                                    struct bnxt_ring_struct *ring,
5089                                    u32 ring_type, int cmpl_ring_id)
5090 {
5091         int rc;
5092         struct hwrm_ring_free_input req = {0};
5093         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5094         u16 error_code;
5095
5096         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5097         req.ring_type = ring_type;
5098         req.ring_id = cpu_to_le16(ring->fw_ring_id);
5099
5100         mutex_lock(&bp->hwrm_cmd_lock);
5101         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5102         error_code = le16_to_cpu(resp->error_code);
5103         mutex_unlock(&bp->hwrm_cmd_lock);
5104
5105         if (rc || error_code) {
5106                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5107                            ring_type, rc, error_code);
5108                 return -EIO;
5109         }
5110         return 0;
5111 }
5112
5113 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5114 {
5115         u32 type;
5116         int i;
5117
5118         if (!bp->bnapi)
5119                 return;
5120
5121         for (i = 0; i < bp->tx_nr_rings; i++) {
5122                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5123                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5124                 u32 cmpl_ring_id;
5125
5126                 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5127                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5128                         hwrm_ring_free_send_msg(bp, ring,
5129                                                 RING_FREE_REQ_RING_TYPE_TX,
5130                                                 close_path ? cmpl_ring_id :
5131                                                 INVALID_HW_RING_ID);
5132                         ring->fw_ring_id = INVALID_HW_RING_ID;
5133                 }
5134         }
5135
5136         for (i = 0; i < bp->rx_nr_rings; i++) {
5137                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5138                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5139                 u32 grp_idx = rxr->bnapi->index;
5140                 u32 cmpl_ring_id;
5141
5142                 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5143                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5144                         hwrm_ring_free_send_msg(bp, ring,
5145                                                 RING_FREE_REQ_RING_TYPE_RX,
5146                                                 close_path ? cmpl_ring_id :
5147                                                 INVALID_HW_RING_ID);
5148                         ring->fw_ring_id = INVALID_HW_RING_ID;
5149                         bp->grp_info[grp_idx].rx_fw_ring_id =
5150                                 INVALID_HW_RING_ID;
5151                 }
5152         }
5153
5154         if (bp->flags & BNXT_FLAG_CHIP_P5)
5155                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5156         else
5157                 type = RING_FREE_REQ_RING_TYPE_RX;
5158         for (i = 0; i < bp->rx_nr_rings; i++) {
5159                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5160                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5161                 u32 grp_idx = rxr->bnapi->index;
5162                 u32 cmpl_ring_id;
5163
5164                 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5165                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5166                         hwrm_ring_free_send_msg(bp, ring, type,
5167                                                 close_path ? cmpl_ring_id :
5168                                                 INVALID_HW_RING_ID);
5169                         ring->fw_ring_id = INVALID_HW_RING_ID;
5170                         bp->grp_info[grp_idx].agg_fw_ring_id =
5171                                 INVALID_HW_RING_ID;
5172                 }
5173         }
5174
5175         /* The completion rings are about to be freed.  After that the
5176          * IRQ doorbell will not work anymore.  So we need to disable
5177          * IRQ here.
5178          */
5179         bnxt_disable_int_sync(bp);
5180
5181         if (bp->flags & BNXT_FLAG_CHIP_P5)
5182                 type = RING_FREE_REQ_RING_TYPE_NQ;
5183         else
5184                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5185         for (i = 0; i < bp->cp_nr_rings; i++) {
5186                 struct bnxt_napi *bnapi = bp->bnapi[i];
5187                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5188                 struct bnxt_ring_struct *ring;
5189                 int j;
5190
5191                 for (j = 0; j < 2; j++) {
5192                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5193
5194                         if (cpr2) {
5195                                 ring = &cpr2->cp_ring_struct;
5196                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5197                                         continue;
5198                                 hwrm_ring_free_send_msg(bp, ring,
5199                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
5200                                         INVALID_HW_RING_ID);
5201                                 ring->fw_ring_id = INVALID_HW_RING_ID;
5202                         }
5203                 }
5204                 ring = &cpr->cp_ring_struct;
5205                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5206                         hwrm_ring_free_send_msg(bp, ring, type,
5207                                                 INVALID_HW_RING_ID);
5208                         ring->fw_ring_id = INVALID_HW_RING_ID;
5209                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5210                 }
5211         }
5212 }
5213
5214 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5215                            bool shared);
5216
5217 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5218 {
5219         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5220         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5221         struct hwrm_func_qcfg_input req = {0};
5222         int rc;
5223
5224         if (bp->hwrm_spec_code < 0x10601)
5225                 return 0;
5226
5227         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5228         req.fid = cpu_to_le16(0xffff);
5229         mutex_lock(&bp->hwrm_cmd_lock);
5230         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5231         if (rc) {
5232                 mutex_unlock(&bp->hwrm_cmd_lock);
5233                 return -EIO;
5234         }
5235
5236         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5237         if (BNXT_NEW_RM(bp)) {
5238                 u16 cp, stats;
5239
5240                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5241                 hw_resc->resv_hw_ring_grps =
5242                         le32_to_cpu(resp->alloc_hw_ring_grps);
5243                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5244                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5245                 stats = le16_to_cpu(resp->alloc_stat_ctx);
5246                 hw_resc->resv_irqs = cp;
5247                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5248                         int rx = hw_resc->resv_rx_rings;
5249                         int tx = hw_resc->resv_tx_rings;
5250
5251                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5252                                 rx >>= 1;
5253                         if (cp < (rx + tx)) {
5254                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5255                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5256                                         rx <<= 1;
5257                                 hw_resc->resv_rx_rings = rx;
5258                                 hw_resc->resv_tx_rings = tx;
5259                         }
5260                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5261                         hw_resc->resv_hw_ring_grps = rx;
5262                 }
5263                 hw_resc->resv_cp_rings = cp;
5264                 hw_resc->resv_stat_ctxs = stats;
5265         }
5266         mutex_unlock(&bp->hwrm_cmd_lock);
5267         return 0;
5268 }
5269
5270 /* Caller must hold bp->hwrm_cmd_lock */
5271 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5272 {
5273         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5274         struct hwrm_func_qcfg_input req = {0};
5275         int rc;
5276
5277         if (bp->hwrm_spec_code < 0x10601)
5278                 return 0;
5279
5280         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5281         req.fid = cpu_to_le16(fid);
5282         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5283         if (!rc)
5284                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5285
5286         return rc;
5287 }
5288
5289 static bool bnxt_rfs_supported(struct bnxt *bp);
5290
5291 static void
5292 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5293                              int tx_rings, int rx_rings, int ring_grps,
5294                              int cp_rings, int stats, int vnics)
5295 {
5296         u32 enables = 0;
5297
5298         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5299         req->fid = cpu_to_le16(0xffff);
5300         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5301         req->num_tx_rings = cpu_to_le16(tx_rings);
5302         if (BNXT_NEW_RM(bp)) {
5303                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5304                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5305                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5306                         enables |= tx_rings + ring_grps ?
5307                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5308                                    FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5309                         enables |= rx_rings ?
5310                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5311                 } else {
5312                         enables |= cp_rings ?
5313                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5314                                    FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5315                         enables |= ring_grps ?
5316                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5317                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5318                 }
5319                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
5320
5321                 req->num_rx_rings = cpu_to_le16(rx_rings);
5322                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5323                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5324                         req->num_msix = cpu_to_le16(cp_rings);
5325                         req->num_rsscos_ctxs =
5326                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5327                 } else {
5328                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
5329                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5330                         req->num_rsscos_ctxs = cpu_to_le16(1);
5331                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5332                             bnxt_rfs_supported(bp))
5333                                 req->num_rsscos_ctxs =
5334                                         cpu_to_le16(ring_grps + 1);
5335                 }
5336                 req->num_stat_ctxs = cpu_to_le16(stats);
5337                 req->num_vnics = cpu_to_le16(vnics);
5338         }
5339         req->enables = cpu_to_le32(enables);
5340 }
5341
5342 static void
5343 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5344                              struct hwrm_func_vf_cfg_input *req, int tx_rings,
5345                              int rx_rings, int ring_grps, int cp_rings,
5346                              int stats, int vnics)
5347 {
5348         u32 enables = 0;
5349
5350         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5351         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5352         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5353                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5354         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5355                 enables |= tx_rings + ring_grps ?
5356                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5357                            FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5358         } else {
5359                 enables |= cp_rings ?
5360                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5361                            FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5362                 enables |= ring_grps ?
5363                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5364         }
5365         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
5366         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
5367
5368         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
5369         req->num_tx_rings = cpu_to_le16(tx_rings);
5370         req->num_rx_rings = cpu_to_le16(rx_rings);
5371         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5372                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5373                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5374         } else {
5375                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5376                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5377                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5378         }
5379         req->num_stat_ctxs = cpu_to_le16(stats);
5380         req->num_vnics = cpu_to_le16(vnics);
5381
5382         req->enables = cpu_to_le32(enables);
5383 }
5384
5385 static int
5386 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5387                            int ring_grps, int cp_rings, int stats, int vnics)
5388 {
5389         struct hwrm_func_cfg_input req = {0};
5390         int rc;
5391
5392         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5393                                      cp_rings, stats, vnics);
5394         if (!req.enables)
5395                 return 0;
5396
5397         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5398         if (rc)
5399                 return -ENOMEM;
5400
5401         if (bp->hwrm_spec_code < 0x10601)
5402                 bp->hw_resc.resv_tx_rings = tx_rings;
5403
5404         rc = bnxt_hwrm_get_rings(bp);
5405         return rc;
5406 }
5407
5408 static int
5409 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5410                            int ring_grps, int cp_rings, int stats, int vnics)
5411 {
5412         struct hwrm_func_vf_cfg_input req = {0};
5413         int rc;
5414
5415         if (!BNXT_NEW_RM(bp)) {
5416                 bp->hw_resc.resv_tx_rings = tx_rings;
5417                 return 0;
5418         }
5419
5420         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5421                                      cp_rings, stats, vnics);
5422         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5423         if (rc)
5424                 return -ENOMEM;
5425
5426         rc = bnxt_hwrm_get_rings(bp);
5427         return rc;
5428 }
5429
5430 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
5431                                    int cp, int stat, int vnic)
5432 {
5433         if (BNXT_PF(bp))
5434                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5435                                                   vnic);
5436         else
5437                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5438                                                   vnic);
5439 }
5440
5441 int bnxt_nq_rings_in_use(struct bnxt *bp)
5442 {
5443         int cp = bp->cp_nr_rings;
5444         int ulp_msix, ulp_base;
5445
5446         ulp_msix = bnxt_get_ulp_msix_num(bp);
5447         if (ulp_msix) {
5448                 ulp_base = bnxt_get_ulp_msix_base(bp);
5449                 cp += ulp_msix;
5450                 if ((ulp_base + ulp_msix) > cp)
5451                         cp = ulp_base + ulp_msix;
5452         }
5453         return cp;
5454 }
5455
5456 static int bnxt_cp_rings_in_use(struct bnxt *bp)
5457 {
5458         int cp;
5459
5460         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5461                 return bnxt_nq_rings_in_use(bp);
5462
5463         cp = bp->tx_nr_rings + bp->rx_nr_rings;
5464         return cp;
5465 }
5466
5467 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5468 {
5469         return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
5470 }
5471
5472 static bool bnxt_need_reserve_rings(struct bnxt *bp)
5473 {
5474         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5475         int cp = bnxt_cp_rings_in_use(bp);
5476         int nq = bnxt_nq_rings_in_use(bp);
5477         int rx = bp->rx_nr_rings, stat;
5478         int vnic = 1, grp = rx;
5479
5480         if (bp->hwrm_spec_code < 0x10601)
5481                 return false;
5482
5483         if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5484                 return true;
5485
5486         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5487                 vnic = rx + 1;
5488         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5489                 rx <<= 1;
5490         stat = bnxt_get_func_stat_ctxs(bp);
5491         if (BNXT_NEW_RM(bp) &&
5492             (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
5493              hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
5494              hw_resc->resv_stat_ctxs != stat ||
5495              (hw_resc->resv_hw_ring_grps != grp &&
5496               !(bp->flags & BNXT_FLAG_CHIP_P5))))
5497                 return true;
5498         return false;
5499 }
5500
5501 static int __bnxt_reserve_rings(struct bnxt *bp)
5502 {
5503         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5504         int cp = bnxt_nq_rings_in_use(bp);
5505         int tx = bp->tx_nr_rings;
5506         int rx = bp->rx_nr_rings;
5507         int grp, rx_rings, rc;
5508         int vnic = 1, stat;
5509         bool sh = false;
5510
5511         if (!bnxt_need_reserve_rings(bp))
5512                 return 0;
5513
5514         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5515                 sh = true;
5516         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5517                 vnic = rx + 1;
5518         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5519                 rx <<= 1;
5520         grp = bp->rx_nr_rings;
5521         stat = bnxt_get_func_stat_ctxs(bp);
5522
5523         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
5524         if (rc)
5525                 return rc;
5526
5527         tx = hw_resc->resv_tx_rings;
5528         if (BNXT_NEW_RM(bp)) {
5529                 rx = hw_resc->resv_rx_rings;
5530                 cp = hw_resc->resv_irqs;
5531                 grp = hw_resc->resv_hw_ring_grps;
5532                 vnic = hw_resc->resv_vnics;
5533                 stat = hw_resc->resv_stat_ctxs;
5534         }
5535
5536         rx_rings = rx;
5537         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5538                 if (rx >= 2) {
5539                         rx_rings = rx >> 1;
5540                 } else {
5541                         if (netif_running(bp->dev))
5542                                 return -ENOMEM;
5543
5544                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5545                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5546                         bp->dev->hw_features &= ~NETIF_F_LRO;
5547                         bp->dev->features &= ~NETIF_F_LRO;
5548                         bnxt_set_ring_params(bp);
5549                 }
5550         }
5551         rx_rings = min_t(int, rx_rings, grp);
5552         cp = min_t(int, cp, bp->cp_nr_rings);
5553         if (stat > bnxt_get_ulp_stat_ctxs(bp))
5554                 stat -= bnxt_get_ulp_stat_ctxs(bp);
5555         cp = min_t(int, cp, stat);
5556         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
5557         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5558                 rx = rx_rings << 1;
5559         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
5560         bp->tx_nr_rings = tx;
5561         bp->rx_nr_rings = rx_rings;
5562         bp->cp_nr_rings = cp;
5563
5564         if (!tx || !rx || !cp || !grp || !vnic || !stat)
5565                 return -ENOMEM;
5566
5567         return rc;
5568 }
5569
5570 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5571                                     int ring_grps, int cp_rings, int stats,
5572                                     int vnics)
5573 {
5574         struct hwrm_func_vf_cfg_input req = {0};
5575         u32 flags;
5576         int rc;
5577
5578         if (!BNXT_NEW_RM(bp))
5579                 return 0;
5580
5581         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5582                                      cp_rings, stats, vnics);
5583         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
5584                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5585                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
5586                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5587                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
5588                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5589         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5590                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5591
5592         req.flags = cpu_to_le32(flags);
5593         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5594         if (rc)
5595                 return -ENOMEM;
5596         return 0;
5597 }
5598
5599 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5600                                     int ring_grps, int cp_rings, int stats,
5601                                     int vnics)
5602 {
5603         struct hwrm_func_cfg_input req = {0};
5604         u32 flags;
5605         int rc;
5606
5607         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5608                                      cp_rings, stats, vnics);
5609         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
5610         if (BNXT_NEW_RM(bp)) {
5611                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5612                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
5613                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5614                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
5615                 if (bp->flags & BNXT_FLAG_CHIP_P5)
5616                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
5617                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
5618                 else
5619                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5620         }
5621
5622         req.flags = cpu_to_le32(flags);
5623         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5624         if (rc)
5625                 return -ENOMEM;
5626         return 0;
5627 }
5628
5629 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5630                                  int ring_grps, int cp_rings, int stats,
5631                                  int vnics)
5632 {
5633         if (bp->hwrm_spec_code < 0x10801)
5634                 return 0;
5635
5636         if (BNXT_PF(bp))
5637                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
5638                                                 ring_grps, cp_rings, stats,
5639                                                 vnics);
5640
5641         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
5642                                         cp_rings, stats, vnics);
5643 }
5644
5645 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
5646 {
5647         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5648         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5649         struct hwrm_ring_aggint_qcaps_input req = {0};
5650         int rc;
5651
5652         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
5653         coal_cap->num_cmpl_dma_aggr_max = 63;
5654         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
5655         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
5656         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
5657         coal_cap->int_lat_tmr_min_max = 65535;
5658         coal_cap->int_lat_tmr_max_max = 65535;
5659         coal_cap->num_cmpl_aggr_int_max = 65535;
5660         coal_cap->timer_units = 80;
5661
5662         if (bp->hwrm_spec_code < 0x10902)
5663                 return;
5664
5665         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
5666         mutex_lock(&bp->hwrm_cmd_lock);
5667         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5668         if (!rc) {
5669                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
5670                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
5671                 coal_cap->num_cmpl_dma_aggr_max =
5672                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
5673                 coal_cap->num_cmpl_dma_aggr_during_int_max =
5674                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
5675                 coal_cap->cmpl_aggr_dma_tmr_max =
5676                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
5677                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
5678                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
5679                 coal_cap->int_lat_tmr_min_max =
5680                         le16_to_cpu(resp->int_lat_tmr_min_max);
5681                 coal_cap->int_lat_tmr_max_max =
5682                         le16_to_cpu(resp->int_lat_tmr_max_max);
5683                 coal_cap->num_cmpl_aggr_int_max =
5684                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
5685                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
5686         }
5687         mutex_unlock(&bp->hwrm_cmd_lock);
5688 }
5689
5690 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
5691 {
5692         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5693
5694         return usec * 1000 / coal_cap->timer_units;
5695 }
5696
5697 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
5698         struct bnxt_coal *hw_coal,
5699         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5700 {
5701         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5702         u32 cmpl_params = coal_cap->cmpl_params;
5703         u16 val, tmr, max, flags = 0;
5704
5705         max = hw_coal->bufs_per_record * 128;
5706         if (hw_coal->budget)
5707                 max = hw_coal->bufs_per_record * hw_coal->budget;
5708         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
5709
5710         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
5711         req->num_cmpl_aggr_int = cpu_to_le16(val);
5712
5713         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
5714         req->num_cmpl_dma_aggr = cpu_to_le16(val);
5715
5716         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
5717                       coal_cap->num_cmpl_dma_aggr_during_int_max);
5718         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
5719
5720         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
5721         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
5722         req->int_lat_tmr_max = cpu_to_le16(tmr);
5723
5724         /* min timer set to 1/2 of interrupt timer */
5725         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
5726                 val = tmr / 2;
5727                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
5728                 req->int_lat_tmr_min = cpu_to_le16(val);
5729                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5730         }
5731
5732         /* buf timer set to 1/4 of interrupt timer */
5733         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
5734         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
5735
5736         if (cmpl_params &
5737             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
5738                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
5739                 val = clamp_t(u16, tmr, 1,
5740                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
5741                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
5742                 req->enables |=
5743                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
5744         }
5745
5746         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
5747                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
5748         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
5749             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
5750                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
5751         req->flags = cpu_to_le16(flags);
5752         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
5753 }
5754
5755 /* Caller holds bp->hwrm_cmd_lock */
5756 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
5757                                    struct bnxt_coal *hw_coal)
5758 {
5759         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5760         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5761         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5762         u32 nq_params = coal_cap->nq_params;
5763         u16 tmr;
5764
5765         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
5766                 return 0;
5767
5768         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5769                                -1, -1);
5770         req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
5771         req.flags =
5772                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
5773
5774         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
5775         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
5776         req.int_lat_tmr_min = cpu_to_le16(tmr);
5777         req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5778         return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5779 }
5780
5781 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
5782 {
5783         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
5784         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5785         struct bnxt_coal coal;
5786
5787         /* Tick values in micro seconds.
5788          * 1 coal_buf x bufs_per_record = 1 completion record.
5789          */
5790         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
5791
5792         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
5793         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
5794
5795         if (!bnapi->rx_ring)
5796                 return -ENODEV;
5797
5798         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5799                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5800
5801         bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
5802
5803         req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
5804
5805         return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
5806                                  HWRM_CMD_TIMEOUT);
5807 }
5808
5809 int bnxt_hwrm_set_coal(struct bnxt *bp)
5810 {
5811         int i, rc = 0;
5812         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
5813                                                            req_tx = {0}, *req;
5814
5815         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5816                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5817         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
5818                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5819
5820         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
5821         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
5822
5823         mutex_lock(&bp->hwrm_cmd_lock);
5824         for (i = 0; i < bp->cp_nr_rings; i++) {
5825                 struct bnxt_napi *bnapi = bp->bnapi[i];
5826                 struct bnxt_coal *hw_coal;
5827                 u16 ring_id;
5828
5829                 req = &req_rx;
5830                 if (!bnapi->rx_ring) {
5831                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
5832                         req = &req_tx;
5833                 } else {
5834                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
5835                 }
5836                 req->ring_id = cpu_to_le16(ring_id);
5837
5838                 rc = _hwrm_send_message(bp, req, sizeof(*req),
5839                                         HWRM_CMD_TIMEOUT);
5840                 if (rc)
5841                         break;
5842
5843                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5844                         continue;
5845
5846                 if (bnapi->rx_ring && bnapi->tx_ring) {
5847                         req = &req_tx;
5848                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
5849                         req->ring_id = cpu_to_le16(ring_id);
5850                         rc = _hwrm_send_message(bp, req, sizeof(*req),
5851                                                 HWRM_CMD_TIMEOUT);
5852                         if (rc)
5853                                 break;
5854                 }
5855                 if (bnapi->rx_ring)
5856                         hw_coal = &bp->rx_coal;
5857                 else
5858                         hw_coal = &bp->tx_coal;
5859                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
5860         }
5861         mutex_unlock(&bp->hwrm_cmd_lock);
5862         return rc;
5863 }
5864
5865 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
5866 {
5867         int rc = 0, i;
5868         struct hwrm_stat_ctx_free_input req = {0};
5869
5870         if (!bp->bnapi)
5871                 return 0;
5872
5873         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5874                 return 0;
5875
5876         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
5877
5878         mutex_lock(&bp->hwrm_cmd_lock);
5879         for (i = 0; i < bp->cp_nr_rings; i++) {
5880                 struct bnxt_napi *bnapi = bp->bnapi[i];
5881                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5882
5883                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
5884                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
5885
5886                         rc = _hwrm_send_message(bp, &req, sizeof(req),
5887                                                 HWRM_CMD_TIMEOUT);
5888                         if (rc)
5889                                 break;
5890
5891                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5892                 }
5893         }
5894         mutex_unlock(&bp->hwrm_cmd_lock);
5895         return rc;
5896 }
5897
5898 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
5899 {
5900         int rc = 0, i;
5901         struct hwrm_stat_ctx_alloc_input req = {0};
5902         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5903
5904         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5905                 return 0;
5906
5907         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
5908
5909         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
5910
5911         mutex_lock(&bp->hwrm_cmd_lock);
5912         for (i = 0; i < bp->cp_nr_rings; i++) {
5913                 struct bnxt_napi *bnapi = bp->bnapi[i];
5914                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5915
5916                 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
5917
5918                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5919                                         HWRM_CMD_TIMEOUT);
5920                 if (rc)
5921                         break;
5922
5923                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
5924
5925                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
5926         }
5927         mutex_unlock(&bp->hwrm_cmd_lock);
5928         return rc;
5929 }
5930
5931 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
5932 {
5933         struct hwrm_func_qcfg_input req = {0};
5934         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5935         u16 flags;
5936         int rc;
5937
5938         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5939         req.fid = cpu_to_le16(0xffff);
5940         mutex_lock(&bp->hwrm_cmd_lock);
5941         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5942         if (rc)
5943                 goto func_qcfg_exit;
5944
5945 #ifdef CONFIG_BNXT_SRIOV
5946         if (BNXT_VF(bp)) {
5947                 struct bnxt_vf_info *vf = &bp->vf;
5948
5949                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
5950         }
5951 #endif
5952         flags = le16_to_cpu(resp->flags);
5953         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
5954                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
5955                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
5956                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
5957                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
5958         }
5959         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
5960                 bp->flags |= BNXT_FLAG_MULTI_HOST;
5961
5962         switch (resp->port_partition_type) {
5963         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
5964         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
5965         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
5966                 bp->port_partition_type = resp->port_partition_type;
5967                 break;
5968         }
5969         if (bp->hwrm_spec_code < 0x10707 ||
5970             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
5971                 bp->br_mode = BRIDGE_MODE_VEB;
5972         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
5973                 bp->br_mode = BRIDGE_MODE_VEPA;
5974         else
5975                 bp->br_mode = BRIDGE_MODE_UNDEF;
5976
5977         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
5978         if (!bp->max_mtu)
5979                 bp->max_mtu = BNXT_MAX_MTU;
5980
5981 func_qcfg_exit:
5982         mutex_unlock(&bp->hwrm_cmd_lock);
5983         return rc;
5984 }
5985
5986 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5987 {
5988         struct hwrm_func_backing_store_qcaps_input req = {0};
5989         struct hwrm_func_backing_store_qcaps_output *resp =
5990                 bp->hwrm_cmd_resp_addr;
5991         int rc;
5992
5993         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
5994                 return 0;
5995
5996         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
5997         mutex_lock(&bp->hwrm_cmd_lock);
5998         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5999         if (!rc) {
6000                 struct bnxt_ctx_pg_info *ctx_pg;
6001                 struct bnxt_ctx_mem_info *ctx;
6002                 int i;
6003
6004                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6005                 if (!ctx) {
6006                         rc = -ENOMEM;
6007                         goto ctx_err;
6008                 }
6009                 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
6010                 if (!ctx_pg) {
6011                         kfree(ctx);
6012                         rc = -ENOMEM;
6013                         goto ctx_err;
6014                 }
6015                 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6016                         ctx->tqm_mem[i] = ctx_pg;
6017
6018                 bp->ctx = ctx;
6019                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6020                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6021                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6022                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6023                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6024                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6025                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6026                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6027                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6028                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6029                 ctx->vnic_max_vnic_entries =
6030                         le16_to_cpu(resp->vnic_max_vnic_entries);
6031                 ctx->vnic_max_ring_table_entries =
6032                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6033                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6034                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6035                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6036                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6037                 ctx->tqm_min_entries_per_ring =
6038                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6039                 ctx->tqm_max_entries_per_ring =
6040                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6041                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6042                 if (!ctx->tqm_entries_multiple)
6043                         ctx->tqm_entries_multiple = 1;
6044                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6045                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6046                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6047                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6048         } else {
6049                 rc = 0;
6050         }
6051 ctx_err:
6052         mutex_unlock(&bp->hwrm_cmd_lock);
6053         return rc;
6054 }
6055
6056 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6057                                   __le64 *pg_dir)
6058 {
6059         u8 pg_size = 0;
6060
6061         if (BNXT_PAGE_SHIFT == 13)
6062                 pg_size = 1 << 4;
6063         else if (BNXT_PAGE_SIZE == 16)
6064                 pg_size = 2 << 4;
6065
6066         *pg_attr = pg_size;
6067         if (rmem->depth >= 1) {
6068                 if (rmem->depth == 2)
6069                         *pg_attr |= 2;
6070                 else
6071                         *pg_attr |= 1;
6072                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6073         } else {
6074                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6075         }
6076 }
6077
6078 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
6079         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
6080          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
6081          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
6082          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
6083          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6084
6085 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6086 {
6087         struct hwrm_func_backing_store_cfg_input req = {0};
6088         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6089         struct bnxt_ctx_pg_info *ctx_pg;
6090         __le32 *num_entries;
6091         __le64 *pg_dir;
6092         u8 *pg_attr;
6093         int i, rc;
6094         u32 ena;
6095
6096         if (!ctx)
6097                 return 0;
6098
6099         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6100         req.enables = cpu_to_le32(enables);
6101
6102         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6103                 ctx_pg = &ctx->qp_mem;
6104                 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6105                 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6106                 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6107                 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6108                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6109                                       &req.qpc_pg_size_qpc_lvl,
6110                                       &req.qpc_page_dir);
6111         }
6112         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6113                 ctx_pg = &ctx->srq_mem;
6114                 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6115                 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6116                 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6117                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6118                                       &req.srq_pg_size_srq_lvl,
6119                                       &req.srq_page_dir);
6120         }
6121         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6122                 ctx_pg = &ctx->cq_mem;
6123                 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6124                 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6125                 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6126                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6127                                       &req.cq_page_dir);
6128         }
6129         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6130                 ctx_pg = &ctx->vnic_mem;
6131                 req.vnic_num_vnic_entries =
6132                         cpu_to_le16(ctx->vnic_max_vnic_entries);
6133                 req.vnic_num_ring_table_entries =
6134                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
6135                 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6136                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6137                                       &req.vnic_pg_size_vnic_lvl,
6138                                       &req.vnic_page_dir);
6139         }
6140         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6141                 ctx_pg = &ctx->stat_mem;
6142                 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6143                 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6144                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6145                                       &req.stat_pg_size_stat_lvl,
6146                                       &req.stat_page_dir);
6147         }
6148         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6149                 ctx_pg = &ctx->mrav_mem;
6150                 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6151                 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6152                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6153                                       &req.mrav_pg_size_mrav_lvl,
6154                                       &req.mrav_page_dir);
6155         }
6156         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6157                 ctx_pg = &ctx->tim_mem;
6158                 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6159                 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6160                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6161                                       &req.tim_pg_size_tim_lvl,
6162                                       &req.tim_page_dir);
6163         }
6164         for (i = 0, num_entries = &req.tqm_sp_num_entries,
6165              pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6166              pg_dir = &req.tqm_sp_page_dir,
6167              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6168              i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6169                 if (!(enables & ena))
6170                         continue;
6171
6172                 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6173                 ctx_pg = ctx->tqm_mem[i];
6174                 *num_entries = cpu_to_le32(ctx_pg->entries);
6175                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6176         }
6177         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6178         if (rc)
6179                 rc = -EIO;
6180         return rc;
6181 }
6182
6183 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6184                                   struct bnxt_ctx_pg_info *ctx_pg)
6185 {
6186         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6187
6188         rmem->page_size = BNXT_PAGE_SIZE;
6189         rmem->pg_arr = ctx_pg->ctx_pg_arr;
6190         rmem->dma_arr = ctx_pg->ctx_dma_arr;
6191         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6192         if (rmem->depth >= 1)
6193                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6194         return bnxt_alloc_ring(bp, rmem);
6195 }
6196
6197 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6198                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6199                                   u8 depth)
6200 {
6201         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6202         int rc;
6203
6204         if (!mem_size)
6205                 return 0;
6206
6207         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6208         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6209                 ctx_pg->nr_pages = 0;
6210                 return -EINVAL;
6211         }
6212         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6213                 int nr_tbls, i;
6214
6215                 rmem->depth = 2;
6216                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6217                                              GFP_KERNEL);
6218                 if (!ctx_pg->ctx_pg_tbl)
6219                         return -ENOMEM;
6220                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6221                 rmem->nr_pages = nr_tbls;
6222                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6223                 if (rc)
6224                         return rc;
6225                 for (i = 0; i < nr_tbls; i++) {
6226                         struct bnxt_ctx_pg_info *pg_tbl;
6227
6228                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6229                         if (!pg_tbl)
6230                                 return -ENOMEM;
6231                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6232                         rmem = &pg_tbl->ring_mem;
6233                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6234                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6235                         rmem->depth = 1;
6236                         rmem->nr_pages = MAX_CTX_PAGES;
6237                         if (i == (nr_tbls - 1)) {
6238                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6239
6240                                 if (rem)
6241                                         rmem->nr_pages = rem;
6242                         }
6243                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6244                         if (rc)
6245                                 break;
6246                 }
6247         } else {
6248                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6249                 if (rmem->nr_pages > 1 || depth)
6250                         rmem->depth = 1;
6251                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6252         }
6253         return rc;
6254 }
6255
6256 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6257                                   struct bnxt_ctx_pg_info *ctx_pg)
6258 {
6259         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6260
6261         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6262             ctx_pg->ctx_pg_tbl) {
6263                 int i, nr_tbls = rmem->nr_pages;
6264
6265                 for (i = 0; i < nr_tbls; i++) {
6266                         struct bnxt_ctx_pg_info *pg_tbl;
6267                         struct bnxt_ring_mem_info *rmem2;
6268
6269                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
6270                         if (!pg_tbl)
6271                                 continue;
6272                         rmem2 = &pg_tbl->ring_mem;
6273                         bnxt_free_ring(bp, rmem2);
6274                         ctx_pg->ctx_pg_arr[i] = NULL;
6275                         kfree(pg_tbl);
6276                         ctx_pg->ctx_pg_tbl[i] = NULL;
6277                 }
6278                 kfree(ctx_pg->ctx_pg_tbl);
6279                 ctx_pg->ctx_pg_tbl = NULL;
6280         }
6281         bnxt_free_ring(bp, rmem);
6282         ctx_pg->nr_pages = 0;
6283 }
6284
6285 static void bnxt_free_ctx_mem(struct bnxt *bp)
6286 {
6287         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6288         int i;
6289
6290         if (!ctx)
6291                 return;
6292
6293         if (ctx->tqm_mem[0]) {
6294                 for (i = 0; i < bp->max_q + 1; i++)
6295                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
6296                 kfree(ctx->tqm_mem[0]);
6297                 ctx->tqm_mem[0] = NULL;
6298         }
6299
6300         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6301         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
6302         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6303         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6304         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6305         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6306         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
6307         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6308 }
6309
6310 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6311 {
6312         struct bnxt_ctx_pg_info *ctx_pg;
6313         struct bnxt_ctx_mem_info *ctx;
6314         u32 mem_size, ena, entries;
6315         u32 extra_srqs = 0;
6316         u32 extra_qps = 0;
6317         u8 pg_lvl = 1;
6318         int i, rc;
6319
6320         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6321         if (rc) {
6322                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6323                            rc);
6324                 return rc;
6325         }
6326         ctx = bp->ctx;
6327         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6328                 return 0;
6329
6330         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
6331                 pg_lvl = 2;
6332                 extra_qps = 65536;
6333                 extra_srqs = 8192;
6334         }
6335
6336         ctx_pg = &ctx->qp_mem;
6337         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6338                           extra_qps;
6339         mem_size = ctx->qp_entry_size * ctx_pg->entries;
6340         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6341         if (rc)
6342                 return rc;
6343
6344         ctx_pg = &ctx->srq_mem;
6345         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
6346         mem_size = ctx->srq_entry_size * ctx_pg->entries;
6347         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6348         if (rc)
6349                 return rc;
6350
6351         ctx_pg = &ctx->cq_mem;
6352         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
6353         mem_size = ctx->cq_entry_size * ctx_pg->entries;
6354         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6355         if (rc)
6356                 return rc;
6357
6358         ctx_pg = &ctx->vnic_mem;
6359         ctx_pg->entries = ctx->vnic_max_vnic_entries +
6360                           ctx->vnic_max_ring_table_entries;
6361         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6362         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6363         if (rc)
6364                 return rc;
6365
6366         ctx_pg = &ctx->stat_mem;
6367         ctx_pg->entries = ctx->stat_max_entries;
6368         mem_size = ctx->stat_entry_size * ctx_pg->entries;
6369         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6370         if (rc)
6371                 return rc;
6372
6373         ena = 0;
6374         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6375                 goto skip_rdma;
6376
6377         ctx_pg = &ctx->mrav_mem;
6378         ctx_pg->entries = extra_qps * 4;
6379         mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6380         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
6381         if (rc)
6382                 return rc;
6383         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
6384
6385         ctx_pg = &ctx->tim_mem;
6386         ctx_pg->entries = ctx->qp_mem.entries;
6387         mem_size = ctx->tim_entry_size * ctx_pg->entries;
6388         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6389         if (rc)
6390                 return rc;
6391         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6392
6393 skip_rdma:
6394         entries = ctx->qp_max_l2_entries + extra_qps;
6395         entries = roundup(entries, ctx->tqm_entries_multiple);
6396         entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6397                           ctx->tqm_max_entries_per_ring);
6398         for (i = 0; i < bp->max_q + 1; i++) {
6399                 ctx_pg = ctx->tqm_mem[i];
6400                 ctx_pg->entries = entries;
6401                 mem_size = ctx->tqm_entry_size * entries;
6402                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6403                 if (rc)
6404                         return rc;
6405                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
6406         }
6407         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6408         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6409         if (rc)
6410                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6411                            rc);
6412         else
6413                 ctx->flags |= BNXT_CTX_FLAG_INITED;
6414
6415         return 0;
6416 }
6417
6418 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
6419 {
6420         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6421         struct hwrm_func_resource_qcaps_input req = {0};
6422         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6423         int rc;
6424
6425         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6426         req.fid = cpu_to_le16(0xffff);
6427
6428         mutex_lock(&bp->hwrm_cmd_lock);
6429         rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6430                                        HWRM_CMD_TIMEOUT);
6431         if (rc) {
6432                 rc = -EIO;
6433                 goto hwrm_func_resc_qcaps_exit;
6434         }
6435
6436         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6437         if (!all)
6438                 goto hwrm_func_resc_qcaps_exit;
6439
6440         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6441         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6442         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6443         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6444         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6445         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6446         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6447         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6448         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6449         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6450         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6451         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6452         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6453         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6454         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6455         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6456
6457         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6458                 u16 max_msix = le16_to_cpu(resp->max_msix);
6459
6460                 hw_resc->max_nqs = max_msix;
6461                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6462         }
6463
6464         if (BNXT_PF(bp)) {
6465                 struct bnxt_pf_info *pf = &bp->pf;
6466
6467                 pf->vf_resv_strategy =
6468                         le16_to_cpu(resp->vf_reservation_strategy);
6469                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
6470                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6471         }
6472 hwrm_func_resc_qcaps_exit:
6473         mutex_unlock(&bp->hwrm_cmd_lock);
6474         return rc;
6475 }
6476
6477 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
6478 {
6479         int rc = 0;
6480         struct hwrm_func_qcaps_input req = {0};
6481         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6482         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6483         u32 flags;
6484
6485         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6486         req.fid = cpu_to_le16(0xffff);
6487
6488         mutex_lock(&bp->hwrm_cmd_lock);
6489         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6490         if (rc)
6491                 goto hwrm_func_qcaps_exit;
6492
6493         flags = le32_to_cpu(resp->flags);
6494         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
6495                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6496         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
6497                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6498
6499         bp->tx_push_thresh = 0;
6500         if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
6501                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6502
6503         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6504         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6505         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6506         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6507         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6508         if (!hw_resc->max_hw_ring_grps)
6509                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6510         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6511         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6512         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6513
6514         if (BNXT_PF(bp)) {
6515                 struct bnxt_pf_info *pf = &bp->pf;
6516
6517                 pf->fw_fid = le16_to_cpu(resp->fid);
6518                 pf->port_id = le16_to_cpu(resp->port_id);
6519                 bp->dev->dev_port = pf->port_id;
6520                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
6521                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6522                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
6523                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6524                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6525                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6526                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6527                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6528                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
6529                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
6530                         bp->flags |= BNXT_FLAG_WOL_CAP;
6531         } else {
6532 #ifdef CONFIG_BNXT_SRIOV
6533                 struct bnxt_vf_info *vf = &bp->vf;
6534
6535                 vf->fw_fid = le16_to_cpu(resp->fid);
6536                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
6537 #endif
6538         }
6539
6540 hwrm_func_qcaps_exit:
6541         mutex_unlock(&bp->hwrm_cmd_lock);
6542         return rc;
6543 }
6544
6545 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
6546
6547 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
6548 {
6549         int rc;
6550
6551         rc = __bnxt_hwrm_func_qcaps(bp);
6552         if (rc)
6553                 return rc;
6554         rc = bnxt_hwrm_queue_qportcfg(bp);
6555         if (rc) {
6556                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
6557                 return rc;
6558         }
6559         if (bp->hwrm_spec_code >= 0x10803) {
6560                 rc = bnxt_alloc_ctx_mem(bp);
6561                 if (rc)
6562                         return rc;
6563                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
6564                 if (!rc)
6565                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
6566         }
6567         return 0;
6568 }
6569
6570 static int bnxt_hwrm_func_reset(struct bnxt *bp)
6571 {
6572         struct hwrm_func_reset_input req = {0};
6573
6574         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
6575         req.enables = 0;
6576
6577         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
6578 }
6579
6580 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
6581 {
6582         int rc = 0;
6583         struct hwrm_queue_qportcfg_input req = {0};
6584         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
6585         u8 i, j, *qptr;
6586         bool no_rdma;
6587
6588         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
6589
6590         mutex_lock(&bp->hwrm_cmd_lock);
6591         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6592         if (rc)
6593                 goto qportcfg_exit;
6594
6595         if (!resp->max_configurable_queues) {
6596                 rc = -EINVAL;
6597                 goto qportcfg_exit;
6598         }
6599         bp->max_tc = resp->max_configurable_queues;
6600         bp->max_lltc = resp->max_configurable_lossless_queues;
6601         if (bp->max_tc > BNXT_MAX_QUEUE)
6602                 bp->max_tc = BNXT_MAX_QUEUE;
6603
6604         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
6605         qptr = &resp->queue_id0;
6606         for (i = 0, j = 0; i < bp->max_tc; i++) {
6607                 bp->q_info[j].queue_id = *qptr;
6608                 bp->q_ids[i] = *qptr++;
6609                 bp->q_info[j].queue_profile = *qptr++;
6610                 bp->tc_to_qidx[j] = j;
6611                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
6612                     (no_rdma && BNXT_PF(bp)))
6613                         j++;
6614         }
6615         bp->max_q = bp->max_tc;
6616         bp->max_tc = max_t(u8, j, 1);
6617
6618         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
6619                 bp->max_tc = 1;
6620
6621         if (bp->max_lltc > bp->max_tc)
6622                 bp->max_lltc = bp->max_tc;
6623
6624 qportcfg_exit:
6625         mutex_unlock(&bp->hwrm_cmd_lock);
6626         return rc;
6627 }
6628
6629 static int bnxt_hwrm_ver_get(struct bnxt *bp)
6630 {
6631         int rc;
6632         struct hwrm_ver_get_input req = {0};
6633         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
6634         u32 dev_caps_cfg;
6635
6636         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
6637         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
6638         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6639         req.hwrm_intf_min = HWRM_VERSION_MINOR;
6640         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6641         mutex_lock(&bp->hwrm_cmd_lock);
6642         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6643         if (rc)
6644                 goto hwrm_ver_get_exit;
6645
6646         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
6647
6648         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
6649                              resp->hwrm_intf_min_8b << 8 |
6650                              resp->hwrm_intf_upd_8b;
6651         if (resp->hwrm_intf_maj_8b < 1) {
6652                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
6653                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
6654                             resp->hwrm_intf_upd_8b);
6655                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
6656         }
6657         snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
6658                  resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
6659                  resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
6660
6661         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
6662         if (!bp->hwrm_cmd_timeout)
6663                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
6664
6665         if (resp->hwrm_intf_maj_8b >= 1) {
6666                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
6667                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
6668         }
6669         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
6670                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
6671
6672         bp->chip_num = le16_to_cpu(resp->chip_num);
6673         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
6674             !resp->chip_metal)
6675                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
6676
6677         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
6678         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
6679             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
6680                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
6681
6682         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
6683                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
6684
6685         if (dev_caps_cfg &
6686             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
6687                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
6688
6689 hwrm_ver_get_exit:
6690         mutex_unlock(&bp->hwrm_cmd_lock);
6691         return rc;
6692 }
6693
6694 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
6695 {
6696         struct hwrm_fw_set_time_input req = {0};
6697         struct tm tm;
6698         time64_t now = ktime_get_real_seconds();
6699
6700         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
6701             bp->hwrm_spec_code < 0x10400)
6702                 return -EOPNOTSUPP;
6703
6704         time64_to_tm(now, 0, &tm);
6705         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
6706         req.year = cpu_to_le16(1900 + tm.tm_year);
6707         req.month = 1 + tm.tm_mon;
6708         req.day = tm.tm_mday;
6709         req.hour = tm.tm_hour;
6710         req.minute = tm.tm_min;
6711         req.second = tm.tm_sec;
6712         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6713 }
6714
6715 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
6716 {
6717         int rc;
6718         struct bnxt_pf_info *pf = &bp->pf;
6719         struct hwrm_port_qstats_input req = {0};
6720
6721         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
6722                 return 0;
6723
6724         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
6725         req.port_id = cpu_to_le16(pf->port_id);
6726         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
6727         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
6728         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6729         return rc;
6730 }
6731
6732 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
6733 {
6734         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
6735         struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
6736         struct hwrm_port_qstats_ext_input req = {0};
6737         struct bnxt_pf_info *pf = &bp->pf;
6738         int rc;
6739
6740         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
6741                 return 0;
6742
6743         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
6744         req.port_id = cpu_to_le16(pf->port_id);
6745         req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
6746         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
6747         req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
6748         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
6749         mutex_lock(&bp->hwrm_cmd_lock);
6750         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6751         if (!rc) {
6752                 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
6753                 bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
6754         } else {
6755                 bp->fw_rx_stats_ext_size = 0;
6756                 bp->fw_tx_stats_ext_size = 0;
6757         }
6758         if (bp->fw_tx_stats_ext_size <=
6759             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
6760                 mutex_unlock(&bp->hwrm_cmd_lock);
6761                 bp->pri2cos_valid = 0;
6762                 return rc;
6763         }
6764
6765         bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
6766         req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
6767
6768         rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
6769         if (!rc) {
6770                 struct hwrm_queue_pri2cos_qcfg_output *resp2;
6771                 u8 *pri2cos;
6772                 int i, j;
6773
6774                 resp2 = bp->hwrm_cmd_resp_addr;
6775                 pri2cos = &resp2->pri0_cos_queue_id;
6776                 for (i = 0; i < 8; i++) {
6777                         u8 queue_id = pri2cos[i];
6778
6779                         for (j = 0; j < bp->max_q; j++) {
6780                                 if (bp->q_ids[j] == queue_id)
6781                                         bp->pri2cos[i] = j;
6782                         }
6783                 }
6784                 bp->pri2cos_valid = 1;
6785         }
6786         mutex_unlock(&bp->hwrm_cmd_lock);
6787         return rc;
6788 }
6789
6790 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
6791 {
6792         if (bp->vxlan_port_cnt) {
6793                 bnxt_hwrm_tunnel_dst_port_free(
6794                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6795         }
6796         bp->vxlan_port_cnt = 0;
6797         if (bp->nge_port_cnt) {
6798                 bnxt_hwrm_tunnel_dst_port_free(
6799                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6800         }
6801         bp->nge_port_cnt = 0;
6802 }
6803
6804 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
6805 {
6806         int rc, i;
6807         u32 tpa_flags = 0;
6808
6809         if (set_tpa)
6810                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
6811         for (i = 0; i < bp->nr_vnics; i++) {
6812                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
6813                 if (rc) {
6814                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
6815                                    i, rc);
6816                         return rc;
6817                 }
6818         }
6819         return 0;
6820 }
6821
6822 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
6823 {
6824         int i;
6825
6826         for (i = 0; i < bp->nr_vnics; i++)
6827                 bnxt_hwrm_vnic_set_rss(bp, i, false);
6828 }
6829
6830 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
6831                                     bool irq_re_init)
6832 {
6833         if (bp->vnic_info) {
6834                 bnxt_hwrm_clear_vnic_filter(bp);
6835                 /* clear all RSS setting before free vnic ctx */
6836                 bnxt_hwrm_clear_vnic_rss(bp);
6837                 bnxt_hwrm_vnic_ctx_free(bp);
6838                 /* before free the vnic, undo the vnic tpa settings */
6839                 if (bp->flags & BNXT_FLAG_TPA)
6840                         bnxt_set_tpa(bp, false);
6841                 bnxt_hwrm_vnic_free(bp);
6842         }
6843         bnxt_hwrm_ring_free(bp, close_path);
6844         bnxt_hwrm_ring_grp_free(bp);
6845         if (irq_re_init) {
6846                 bnxt_hwrm_stat_ctx_free(bp);
6847                 bnxt_hwrm_free_tunnel_ports(bp);
6848         }
6849 }
6850
6851 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
6852 {
6853         struct hwrm_func_cfg_input req = {0};
6854         int rc;
6855
6856         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6857         req.fid = cpu_to_le16(0xffff);
6858         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
6859         if (br_mode == BRIDGE_MODE_VEB)
6860                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
6861         else if (br_mode == BRIDGE_MODE_VEPA)
6862                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
6863         else
6864                 return -EINVAL;
6865         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6866         if (rc)
6867                 rc = -EIO;
6868         return rc;
6869 }
6870
6871 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
6872 {
6873         struct hwrm_func_cfg_input req = {0};
6874         int rc;
6875
6876         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
6877                 return 0;
6878
6879         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6880         req.fid = cpu_to_le16(0xffff);
6881         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
6882         req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
6883         if (size == 128)
6884                 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
6885
6886         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6887         if (rc)
6888                 rc = -EIO;
6889         return rc;
6890 }
6891
6892 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
6893 {
6894         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6895         int rc;
6896
6897         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
6898                 goto skip_rss_ctx;
6899
6900         /* allocate context for vnic */
6901         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
6902         if (rc) {
6903                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
6904                            vnic_id, rc);
6905                 goto vnic_setup_err;
6906         }
6907         bp->rsscos_nr_ctxs++;
6908
6909         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6910                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
6911                 if (rc) {
6912                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
6913                                    vnic_id, rc);
6914                         goto vnic_setup_err;
6915                 }
6916                 bp->rsscos_nr_ctxs++;
6917         }
6918
6919 skip_rss_ctx:
6920         /* configure default vnic, ring grp */
6921         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6922         if (rc) {
6923                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6924                            vnic_id, rc);
6925                 goto vnic_setup_err;
6926         }
6927
6928         /* Enable RSS hashing on vnic */
6929         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
6930         if (rc) {
6931                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
6932                            vnic_id, rc);
6933                 goto vnic_setup_err;
6934         }
6935
6936         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6937                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6938                 if (rc) {
6939                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6940                                    vnic_id, rc);
6941                 }
6942         }
6943
6944 vnic_setup_err:
6945         return rc;
6946 }
6947
6948 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
6949 {
6950         int rc, i, nr_ctxs;
6951
6952         nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
6953         for (i = 0; i < nr_ctxs; i++) {
6954                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
6955                 if (rc) {
6956                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
6957                                    vnic_id, i, rc);
6958                         break;
6959                 }
6960                 bp->rsscos_nr_ctxs++;
6961         }
6962         if (i < nr_ctxs)
6963                 return -ENOMEM;
6964
6965         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
6966         if (rc) {
6967                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
6968                            vnic_id, rc);
6969                 return rc;
6970         }
6971         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6972         if (rc) {
6973                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6974                            vnic_id, rc);
6975                 return rc;
6976         }
6977         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6978                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6979                 if (rc) {
6980                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6981                                    vnic_id, rc);
6982                 }
6983         }
6984         return rc;
6985 }
6986
6987 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
6988 {
6989         if (bp->flags & BNXT_FLAG_CHIP_P5)
6990                 return __bnxt_setup_vnic_p5(bp, vnic_id);
6991         else
6992                 return __bnxt_setup_vnic(bp, vnic_id);
6993 }
6994
6995 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
6996 {
6997 #ifdef CONFIG_RFS_ACCEL
6998         int i, rc = 0;
6999
7000         for (i = 0; i < bp->rx_nr_rings; i++) {
7001                 struct bnxt_vnic_info *vnic;
7002                 u16 vnic_id = i + 1;
7003                 u16 ring_id = i;
7004
7005                 if (vnic_id >= bp->nr_vnics)
7006                         break;
7007
7008                 vnic = &bp->vnic_info[vnic_id];
7009                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7010                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7011                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
7012                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
7013                 if (rc) {
7014                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7015                                    vnic_id, rc);
7016                         break;
7017                 }
7018                 rc = bnxt_setup_vnic(bp, vnic_id);
7019                 if (rc)
7020                         break;
7021         }
7022         return rc;
7023 #else
7024         return 0;
7025 #endif
7026 }
7027
7028 /* Allow PF and VF with default VLAN to be in promiscuous mode */
7029 static bool bnxt_promisc_ok(struct bnxt *bp)
7030 {
7031 #ifdef CONFIG_BNXT_SRIOV
7032         if (BNXT_VF(bp) && !bp->vf.vlan)
7033                 return false;
7034 #endif
7035         return true;
7036 }
7037
7038 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7039 {
7040         unsigned int rc = 0;
7041
7042         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7043         if (rc) {
7044                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7045                            rc);
7046                 return rc;
7047         }
7048
7049         rc = bnxt_hwrm_vnic_cfg(bp, 1);
7050         if (rc) {
7051                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7052                            rc);
7053                 return rc;
7054         }
7055         return rc;
7056 }
7057
7058 static int bnxt_cfg_rx_mode(struct bnxt *);
7059 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
7060
7061 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7062 {
7063         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7064         int rc = 0;
7065         unsigned int rx_nr_rings = bp->rx_nr_rings;
7066
7067         if (irq_re_init) {
7068                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7069                 if (rc) {
7070                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7071                                    rc);
7072                         goto err_out;
7073                 }
7074         }
7075
7076         rc = bnxt_hwrm_ring_alloc(bp);
7077         if (rc) {
7078                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7079                 goto err_out;
7080         }
7081
7082         rc = bnxt_hwrm_ring_grp_alloc(bp);
7083         if (rc) {
7084                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7085                 goto err_out;
7086         }
7087
7088         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7089                 rx_nr_rings--;
7090
7091         /* default vnic 0 */
7092         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
7093         if (rc) {
7094                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7095                 goto err_out;
7096         }
7097
7098         rc = bnxt_setup_vnic(bp, 0);
7099         if (rc)
7100                 goto err_out;
7101
7102         if (bp->flags & BNXT_FLAG_RFS) {
7103                 rc = bnxt_alloc_rfs_vnics(bp);
7104                 if (rc)
7105                         goto err_out;
7106         }
7107
7108         if (bp->flags & BNXT_FLAG_TPA) {
7109                 rc = bnxt_set_tpa(bp, true);
7110                 if (rc)
7111                         goto err_out;
7112         }
7113
7114         if (BNXT_VF(bp))
7115                 bnxt_update_vf_mac(bp);
7116
7117         /* Filter for default vnic 0 */
7118         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7119         if (rc) {
7120                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7121                 goto err_out;
7122         }
7123         vnic->uc_filter_count = 1;
7124
7125         vnic->rx_mask = 0;
7126         if (bp->dev->flags & IFF_BROADCAST)
7127                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7128
7129         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7130                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7131
7132         if (bp->dev->flags & IFF_ALLMULTI) {
7133                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7134                 vnic->mc_list_count = 0;
7135         } else {
7136                 u32 mask = 0;
7137
7138                 bnxt_mc_list_updated(bp, &mask);
7139                 vnic->rx_mask |= mask;
7140         }
7141
7142         rc = bnxt_cfg_rx_mode(bp);
7143         if (rc)
7144                 goto err_out;
7145
7146         rc = bnxt_hwrm_set_coal(bp);
7147         if (rc)
7148                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
7149                                 rc);
7150
7151         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7152                 rc = bnxt_setup_nitroa0_vnic(bp);
7153                 if (rc)
7154                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7155                                    rc);
7156         }
7157
7158         if (BNXT_VF(bp)) {
7159                 bnxt_hwrm_func_qcfg(bp);
7160                 netdev_update_features(bp->dev);
7161         }
7162
7163         return 0;
7164
7165 err_out:
7166         bnxt_hwrm_resource_free(bp, 0, true);
7167
7168         return rc;
7169 }
7170
7171 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7172 {
7173         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7174         return 0;
7175 }
7176
7177 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7178 {
7179         bnxt_init_cp_rings(bp);
7180         bnxt_init_rx_rings(bp);
7181         bnxt_init_tx_rings(bp);
7182         bnxt_init_ring_grps(bp, irq_re_init);
7183         bnxt_init_vnics(bp);
7184
7185         return bnxt_init_chip(bp, irq_re_init);
7186 }
7187
7188 static int bnxt_set_real_num_queues(struct bnxt *bp)
7189 {
7190         int rc;
7191         struct net_device *dev = bp->dev;
7192
7193         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7194                                           bp->tx_nr_rings_xdp);
7195         if (rc)
7196                 return rc;
7197
7198         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7199         if (rc)
7200                 return rc;
7201
7202 #ifdef CONFIG_RFS_ACCEL
7203         if (bp->flags & BNXT_FLAG_RFS)
7204                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
7205 #endif
7206
7207         return rc;
7208 }
7209
7210 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7211                            bool shared)
7212 {
7213         int _rx = *rx, _tx = *tx;
7214
7215         if (shared) {
7216                 *rx = min_t(int, _rx, max);
7217                 *tx = min_t(int, _tx, max);
7218         } else {
7219                 if (max < 2)
7220                         return -ENOMEM;
7221
7222                 while (_rx + _tx > max) {
7223                         if (_rx > _tx && _rx > 1)
7224                                 _rx--;
7225                         else if (_tx > 1)
7226                                 _tx--;
7227                 }
7228                 *rx = _rx;
7229                 *tx = _tx;
7230         }
7231         return 0;
7232 }
7233
7234 static void bnxt_setup_msix(struct bnxt *bp)
7235 {
7236         const int len = sizeof(bp->irq_tbl[0].name);
7237         struct net_device *dev = bp->dev;
7238         int tcs, i;
7239
7240         tcs = netdev_get_num_tc(dev);
7241         if (tcs > 1) {
7242                 int i, off, count;
7243
7244                 for (i = 0; i < tcs; i++) {
7245                         count = bp->tx_nr_rings_per_tc;
7246                         off = i * count;
7247                         netdev_set_tc_queue(dev, i, count, off);
7248                 }
7249         }
7250
7251         for (i = 0; i < bp->cp_nr_rings; i++) {
7252                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7253                 char *attr;
7254
7255                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7256                         attr = "TxRx";
7257                 else if (i < bp->rx_nr_rings)
7258                         attr = "rx";
7259                 else
7260                         attr = "tx";
7261
7262                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7263                          attr, i);
7264                 bp->irq_tbl[map_idx].handler = bnxt_msix;
7265         }
7266 }
7267
7268 static void bnxt_setup_inta(struct bnxt *bp)
7269 {
7270         const int len = sizeof(bp->irq_tbl[0].name);
7271
7272         if (netdev_get_num_tc(bp->dev))
7273                 netdev_reset_tc(bp->dev);
7274
7275         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7276                  0);
7277         bp->irq_tbl[0].handler = bnxt_inta;
7278 }
7279
7280 static int bnxt_setup_int_mode(struct bnxt *bp)
7281 {
7282         int rc;
7283
7284         if (bp->flags & BNXT_FLAG_USING_MSIX)
7285                 bnxt_setup_msix(bp);
7286         else
7287                 bnxt_setup_inta(bp);
7288
7289         rc = bnxt_set_real_num_queues(bp);
7290         return rc;
7291 }
7292
7293 #ifdef CONFIG_RFS_ACCEL
7294 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7295 {
7296         return bp->hw_resc.max_rsscos_ctxs;
7297 }
7298
7299 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7300 {
7301         return bp->hw_resc.max_vnics;
7302 }
7303 #endif
7304
7305 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7306 {
7307         return bp->hw_resc.max_stat_ctxs;
7308 }
7309
7310 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7311 {
7312         return bp->hw_resc.max_cp_rings;
7313 }
7314
7315 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
7316 {
7317         unsigned int cp = bp->hw_resc.max_cp_rings;
7318
7319         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7320                 cp -= bnxt_get_ulp_msix_num(bp);
7321
7322         return cp;
7323 }
7324
7325 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7326 {
7327         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7328
7329         if (bp->flags & BNXT_FLAG_CHIP_P5)
7330                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7331
7332         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7333 }
7334
7335 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
7336 {
7337         bp->hw_resc.max_irqs = max_irqs;
7338 }
7339
7340 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7341 {
7342         unsigned int cp;
7343
7344         cp = bnxt_get_max_func_cp_rings_for_en(bp);
7345         if (bp->flags & BNXT_FLAG_CHIP_P5)
7346                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
7347         else
7348                 return cp - bp->cp_nr_rings;
7349 }
7350
7351 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
7352 {
7353         unsigned int stat;
7354
7355         stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
7356         stat -= bp->cp_nr_rings;
7357         return stat;
7358 }
7359
7360 int bnxt_get_avail_msix(struct bnxt *bp, int num)
7361 {
7362         int max_cp = bnxt_get_max_func_cp_rings(bp);
7363         int max_irq = bnxt_get_max_func_irqs(bp);
7364         int total_req = bp->cp_nr_rings + num;
7365         int max_idx, avail_msix;
7366
7367         max_idx = bp->total_irqs;
7368         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7369                 max_idx = min_t(int, bp->total_irqs, max_cp);
7370         avail_msix = max_idx - bp->cp_nr_rings;
7371         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
7372                 return avail_msix;
7373
7374         if (max_irq < total_req) {
7375                 num = max_irq - bp->cp_nr_rings;
7376                 if (num <= 0)
7377                         return 0;
7378         }
7379         return num;
7380 }
7381
7382 static int bnxt_get_num_msix(struct bnxt *bp)
7383 {
7384         if (!BNXT_NEW_RM(bp))
7385                 return bnxt_get_max_func_irqs(bp);
7386
7387         return bnxt_nq_rings_in_use(bp);
7388 }
7389
7390 static int bnxt_init_msix(struct bnxt *bp)
7391 {
7392         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7393         struct msix_entry *msix_ent;
7394
7395         total_vecs = bnxt_get_num_msix(bp);
7396         max = bnxt_get_max_func_irqs(bp);
7397         if (total_vecs > max)
7398                 total_vecs = max;
7399
7400         if (!total_vecs)
7401                 return 0;
7402
7403         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
7404         if (!msix_ent)
7405                 return -ENOMEM;
7406
7407         for (i = 0; i < total_vecs; i++) {
7408                 msix_ent[i].entry = i;
7409                 msix_ent[i].vector = 0;
7410         }
7411
7412         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
7413                 min = 2;
7414
7415         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
7416         ulp_msix = bnxt_get_ulp_msix_num(bp);
7417         if (total_vecs < 0 || total_vecs < ulp_msix) {
7418                 rc = -ENODEV;
7419                 goto msix_setup_exit;
7420         }
7421
7422         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
7423         if (bp->irq_tbl) {
7424                 for (i = 0; i < total_vecs; i++)
7425                         bp->irq_tbl[i].vector = msix_ent[i].vector;
7426
7427                 bp->total_irqs = total_vecs;
7428                 /* Trim rings based upon num of vectors allocated */
7429                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
7430                                      total_vecs - ulp_msix, min == 1);
7431                 if (rc)
7432                         goto msix_setup_exit;
7433
7434                 bp->cp_nr_rings = (min == 1) ?
7435                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7436                                   bp->tx_nr_rings + bp->rx_nr_rings;
7437
7438         } else {
7439                 rc = -ENOMEM;
7440                 goto msix_setup_exit;
7441         }
7442         bp->flags |= BNXT_FLAG_USING_MSIX;
7443         kfree(msix_ent);
7444         return 0;
7445
7446 msix_setup_exit:
7447         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
7448         kfree(bp->irq_tbl);
7449         bp->irq_tbl = NULL;
7450         pci_disable_msix(bp->pdev);
7451         kfree(msix_ent);
7452         return rc;
7453 }
7454
7455 static int bnxt_init_inta(struct bnxt *bp)
7456 {
7457         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
7458         if (!bp->irq_tbl)
7459                 return -ENOMEM;
7460
7461         bp->total_irqs = 1;
7462         bp->rx_nr_rings = 1;
7463         bp->tx_nr_rings = 1;
7464         bp->cp_nr_rings = 1;
7465         bp->flags |= BNXT_FLAG_SHARED_RINGS;
7466         bp->irq_tbl[0].vector = bp->pdev->irq;
7467         return 0;
7468 }
7469
7470 static int bnxt_init_int_mode(struct bnxt *bp)
7471 {
7472         int rc = 0;
7473
7474         if (bp->flags & BNXT_FLAG_MSIX_CAP)
7475                 rc = bnxt_init_msix(bp);
7476
7477         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
7478                 /* fallback to INTA */
7479                 rc = bnxt_init_inta(bp);
7480         }
7481         return rc;
7482 }
7483
7484 static void bnxt_clear_int_mode(struct bnxt *bp)
7485 {
7486         if (bp->flags & BNXT_FLAG_USING_MSIX)
7487                 pci_disable_msix(bp->pdev);
7488
7489         kfree(bp->irq_tbl);
7490         bp->irq_tbl = NULL;
7491         bp->flags &= ~BNXT_FLAG_USING_MSIX;
7492 }
7493
7494 int bnxt_reserve_rings(struct bnxt *bp)
7495 {
7496         int tcs = netdev_get_num_tc(bp->dev);
7497         bool reinit_irq = false;
7498         int rc;
7499
7500         if (!bnxt_need_reserve_rings(bp))
7501                 return 0;
7502
7503         if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
7504                 bnxt_ulp_irq_stop(bp);
7505                 bnxt_clear_int_mode(bp);
7506                 reinit_irq = true;
7507         }
7508         rc = __bnxt_reserve_rings(bp);
7509         if (reinit_irq) {
7510                 if (!rc)
7511                         rc = bnxt_init_int_mode(bp);
7512                 bnxt_ulp_irq_restart(bp, rc);
7513         }
7514         if (rc) {
7515                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
7516                 return rc;
7517         }
7518         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
7519                 netdev_err(bp->dev, "tx ring reservation failure\n");
7520                 netdev_reset_tc(bp->dev);
7521                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
7522                 return -ENOMEM;
7523         }
7524         return 0;
7525 }
7526
7527 static void bnxt_free_irq(struct bnxt *bp)
7528 {
7529         struct bnxt_irq *irq;
7530         int i;
7531
7532 #ifdef CONFIG_RFS_ACCEL
7533         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
7534         bp->dev->rx_cpu_rmap = NULL;
7535 #endif
7536         if (!bp->irq_tbl || !bp->bnapi)
7537                 return;
7538
7539         for (i = 0; i < bp->cp_nr_rings; i++) {
7540                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7541
7542                 irq = &bp->irq_tbl[map_idx];
7543                 if (irq->requested) {
7544                         if (irq->have_cpumask) {
7545                                 irq_set_affinity_hint(irq->vector, NULL);
7546                                 free_cpumask_var(irq->cpu_mask);
7547                                 irq->have_cpumask = 0;
7548                         }
7549                         free_irq(irq->vector, bp->bnapi[i]);
7550                 }
7551
7552                 irq->requested = 0;
7553         }
7554 }
7555
7556 static int bnxt_request_irq(struct bnxt *bp)
7557 {
7558         int i, j, rc = 0;
7559         unsigned long flags = 0;
7560 #ifdef CONFIG_RFS_ACCEL
7561         struct cpu_rmap *rmap;
7562 #endif
7563
7564         rc = bnxt_setup_int_mode(bp);
7565         if (rc) {
7566                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
7567                            rc);
7568                 return rc;
7569         }
7570 #ifdef CONFIG_RFS_ACCEL
7571         rmap = bp->dev->rx_cpu_rmap;
7572 #endif
7573         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
7574                 flags = IRQF_SHARED;
7575
7576         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
7577                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7578                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
7579
7580 #ifdef CONFIG_RFS_ACCEL
7581                 if (rmap && bp->bnapi[i]->rx_ring) {
7582                         rc = irq_cpu_rmap_add(rmap, irq->vector);
7583                         if (rc)
7584                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
7585                                             j);
7586                         j++;
7587                 }
7588 #endif
7589                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
7590                                  bp->bnapi[i]);
7591                 if (rc)
7592                         break;
7593
7594                 irq->requested = 1;
7595
7596                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
7597                         int numa_node = dev_to_node(&bp->pdev->dev);
7598
7599                         irq->have_cpumask = 1;
7600                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
7601                                         irq->cpu_mask);
7602                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
7603                         if (rc) {
7604                                 netdev_warn(bp->dev,
7605                                             "Set affinity failed, IRQ = %d\n",
7606                                             irq->vector);
7607                                 break;
7608                         }
7609                 }
7610         }
7611         return rc;
7612 }
7613
7614 static void bnxt_del_napi(struct bnxt *bp)
7615 {
7616         int i;
7617
7618         if (!bp->bnapi)
7619                 return;
7620
7621         for (i = 0; i < bp->cp_nr_rings; i++) {
7622                 struct bnxt_napi *bnapi = bp->bnapi[i];
7623
7624                 napi_hash_del(&bnapi->napi);
7625                 netif_napi_del(&bnapi->napi);
7626         }
7627         /* We called napi_hash_del() before netif_napi_del(), we need
7628          * to respect an RCU grace period before freeing napi structures.
7629          */
7630         synchronize_net();
7631 }
7632
7633 static void bnxt_init_napi(struct bnxt *bp)
7634 {
7635         int i;
7636         unsigned int cp_nr_rings = bp->cp_nr_rings;
7637         struct bnxt_napi *bnapi;
7638
7639         if (bp->flags & BNXT_FLAG_USING_MSIX) {
7640                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
7641
7642                 if (bp->flags & BNXT_FLAG_CHIP_P5)
7643                         poll_fn = bnxt_poll_p5;
7644                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7645                         cp_nr_rings--;
7646                 for (i = 0; i < cp_nr_rings; i++) {
7647                         bnapi = bp->bnapi[i];
7648                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
7649                 }
7650                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7651                         bnapi = bp->bnapi[cp_nr_rings];
7652                         netif_napi_add(bp->dev, &bnapi->napi,
7653                                        bnxt_poll_nitroa0, 64);
7654                 }
7655         } else {
7656                 bnapi = bp->bnapi[0];
7657                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
7658         }
7659 }
7660
7661 static void bnxt_disable_napi(struct bnxt *bp)
7662 {
7663         int i;
7664
7665         if (!bp->bnapi)
7666                 return;
7667
7668         for (i = 0; i < bp->cp_nr_rings; i++) {
7669                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
7670
7671                 if (bp->bnapi[i]->rx_ring)
7672                         cancel_work_sync(&cpr->dim.work);
7673
7674                 napi_disable(&bp->bnapi[i]->napi);
7675         }
7676 }
7677
7678 static void bnxt_enable_napi(struct bnxt *bp)
7679 {
7680         int i;
7681
7682         for (i = 0; i < bp->cp_nr_rings; i++) {
7683                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
7684                 bp->bnapi[i]->in_reset = false;
7685
7686                 if (bp->bnapi[i]->rx_ring) {
7687                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
7688                         cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
7689                 }
7690                 napi_enable(&bp->bnapi[i]->napi);
7691         }
7692 }
7693
7694 void bnxt_tx_disable(struct bnxt *bp)
7695 {
7696         int i;
7697         struct bnxt_tx_ring_info *txr;
7698
7699         if (bp->tx_ring) {
7700                 for (i = 0; i < bp->tx_nr_rings; i++) {
7701                         txr = &bp->tx_ring[i];
7702                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
7703                 }
7704         }
7705         /* Stop all TX queues */
7706         netif_tx_disable(bp->dev);
7707         netif_carrier_off(bp->dev);
7708 }
7709
7710 void bnxt_tx_enable(struct bnxt *bp)
7711 {
7712         int i;
7713         struct bnxt_tx_ring_info *txr;
7714
7715         for (i = 0; i < bp->tx_nr_rings; i++) {
7716                 txr = &bp->tx_ring[i];
7717                 txr->dev_state = 0;
7718         }
7719         netif_tx_wake_all_queues(bp->dev);
7720         if (bp->link_info.link_up)
7721                 netif_carrier_on(bp->dev);
7722 }
7723
7724 static void bnxt_report_link(struct bnxt *bp)
7725 {
7726         if (bp->link_info.link_up) {
7727                 const char *duplex;
7728                 const char *flow_ctrl;
7729                 u32 speed;
7730                 u16 fec;
7731
7732                 netif_carrier_on(bp->dev);
7733                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
7734                         duplex = "full";
7735                 else
7736                         duplex = "half";
7737                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
7738                         flow_ctrl = "ON - receive & transmit";
7739                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
7740                         flow_ctrl = "ON - transmit";
7741                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
7742                         flow_ctrl = "ON - receive";
7743                 else
7744                         flow_ctrl = "none";
7745                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
7746                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
7747                             speed, duplex, flow_ctrl);
7748                 if (bp->flags & BNXT_FLAG_EEE_CAP)
7749                         netdev_info(bp->dev, "EEE is %s\n",
7750                                     bp->eee.eee_active ? "active" :
7751                                                          "not active");
7752                 fec = bp->link_info.fec_cfg;
7753                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
7754                         netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
7755                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
7756                                     (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
7757                                      (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
7758         } else {
7759                 netif_carrier_off(bp->dev);
7760                 netdev_err(bp->dev, "NIC Link is Down\n");
7761         }
7762 }
7763
7764 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
7765 {
7766         int rc = 0;
7767         struct hwrm_port_phy_qcaps_input req = {0};
7768         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7769         struct bnxt_link_info *link_info = &bp->link_info;
7770
7771         if (bp->hwrm_spec_code < 0x10201)
7772                 return 0;
7773
7774         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
7775
7776         mutex_lock(&bp->hwrm_cmd_lock);
7777         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7778         if (rc)
7779                 goto hwrm_phy_qcaps_exit;
7780
7781         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
7782                 struct ethtool_eee *eee = &bp->eee;
7783                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
7784
7785                 bp->flags |= BNXT_FLAG_EEE_CAP;
7786                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7787                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
7788                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
7789                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
7790                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
7791         }
7792         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
7793                 if (bp->test_info)
7794                         bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
7795         }
7796         if (resp->supported_speeds_auto_mode)
7797                 link_info->support_auto_speeds =
7798                         le16_to_cpu(resp->supported_speeds_auto_mode);
7799
7800         bp->port_count = resp->port_cnt;
7801
7802 hwrm_phy_qcaps_exit:
7803         mutex_unlock(&bp->hwrm_cmd_lock);
7804         return rc;
7805 }
7806
7807 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
7808 {
7809         int rc = 0;
7810         struct bnxt_link_info *link_info = &bp->link_info;
7811         struct hwrm_port_phy_qcfg_input req = {0};
7812         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7813         u8 link_up = link_info->link_up;
7814         u16 diff;
7815
7816         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
7817
7818         mutex_lock(&bp->hwrm_cmd_lock);
7819         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7820         if (rc) {
7821                 mutex_unlock(&bp->hwrm_cmd_lock);
7822                 return rc;
7823         }
7824
7825         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
7826         link_info->phy_link_status = resp->link;
7827         link_info->duplex = resp->duplex_cfg;
7828         if (bp->hwrm_spec_code >= 0x10800)
7829                 link_info->duplex = resp->duplex_state;
7830         link_info->pause = resp->pause;
7831         link_info->auto_mode = resp->auto_mode;
7832         link_info->auto_pause_setting = resp->auto_pause;
7833         link_info->lp_pause = resp->link_partner_adv_pause;
7834         link_info->force_pause_setting = resp->force_pause;
7835         link_info->duplex_setting = resp->duplex_cfg;
7836         if (link_info->phy_link_status == BNXT_LINK_LINK)
7837                 link_info->link_speed = le16_to_cpu(resp->link_speed);
7838         else
7839                 link_info->link_speed = 0;
7840         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
7841         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
7842         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
7843         link_info->lp_auto_link_speeds =
7844                 le16_to_cpu(resp->link_partner_adv_speeds);
7845         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
7846         link_info->phy_ver[0] = resp->phy_maj;
7847         link_info->phy_ver[1] = resp->phy_min;
7848         link_info->phy_ver[2] = resp->phy_bld;
7849         link_info->media_type = resp->media_type;
7850         link_info->phy_type = resp->phy_type;
7851         link_info->transceiver = resp->xcvr_pkg_type;
7852         link_info->phy_addr = resp->eee_config_phy_addr &
7853                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
7854         link_info->module_status = resp->module_status;
7855
7856         if (bp->flags & BNXT_FLAG_EEE_CAP) {
7857                 struct ethtool_eee *eee = &bp->eee;
7858                 u16 fw_speeds;
7859
7860                 eee->eee_active = 0;
7861                 if (resp->eee_config_phy_addr &
7862                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
7863                         eee->eee_active = 1;
7864                         fw_speeds = le16_to_cpu(
7865                                 resp->link_partner_adv_eee_link_speed_mask);
7866                         eee->lp_advertised =
7867                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7868                 }
7869
7870                 /* Pull initial EEE config */
7871                 if (!chng_link_state) {
7872                         if (resp->eee_config_phy_addr &
7873                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
7874                                 eee->eee_enabled = 1;
7875
7876                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
7877                         eee->advertised =
7878                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7879
7880                         if (resp->eee_config_phy_addr &
7881                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
7882                                 __le32 tmr;
7883
7884                                 eee->tx_lpi_enabled = 1;
7885                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
7886                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
7887                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
7888                         }
7889                 }
7890         }
7891
7892         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
7893         if (bp->hwrm_spec_code >= 0x10504)
7894                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
7895
7896         /* TODO: need to add more logic to report VF link */
7897         if (chng_link_state) {
7898                 if (link_info->phy_link_status == BNXT_LINK_LINK)
7899                         link_info->link_up = 1;
7900                 else
7901                         link_info->link_up = 0;
7902                 if (link_up != link_info->link_up)
7903                         bnxt_report_link(bp);
7904         } else {
7905                 /* alwasy link down if not require to update link state */
7906                 link_info->link_up = 0;
7907         }
7908         mutex_unlock(&bp->hwrm_cmd_lock);
7909
7910         if (!BNXT_SINGLE_PF(bp))
7911                 return 0;
7912
7913         diff = link_info->support_auto_speeds ^ link_info->advertising;
7914         if ((link_info->support_auto_speeds | diff) !=
7915             link_info->support_auto_speeds) {
7916                 /* An advertised speed is no longer supported, so we need to
7917                  * update the advertisement settings.  Caller holds RTNL
7918                  * so we can modify link settings.
7919                  */
7920                 link_info->advertising = link_info->support_auto_speeds;
7921                 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
7922                         bnxt_hwrm_set_link_setting(bp, true, false);
7923         }
7924         return 0;
7925 }
7926
7927 static void bnxt_get_port_module_status(struct bnxt *bp)
7928 {
7929         struct bnxt_link_info *link_info = &bp->link_info;
7930         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
7931         u8 module_status;
7932
7933         if (bnxt_update_link(bp, true))
7934                 return;
7935
7936         module_status = link_info->module_status;
7937         switch (module_status) {
7938         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
7939         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
7940         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
7941                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
7942                             bp->pf.port_id);
7943                 if (bp->hwrm_spec_code >= 0x10201) {
7944                         netdev_warn(bp->dev, "Module part number %s\n",
7945                                     resp->phy_vendor_partnumber);
7946                 }
7947                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
7948                         netdev_warn(bp->dev, "TX is disabled\n");
7949                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
7950                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
7951         }
7952 }
7953
7954 static void
7955 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
7956 {
7957         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
7958                 if (bp->hwrm_spec_code >= 0x10201)
7959                         req->auto_pause =
7960                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
7961                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7962                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
7963                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
7964                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
7965                 req->enables |=
7966                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7967         } else {
7968                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7969                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
7970                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
7971                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
7972                 req->enables |=
7973                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
7974                 if (bp->hwrm_spec_code >= 0x10201) {
7975                         req->auto_pause = req->force_pause;
7976                         req->enables |= cpu_to_le32(
7977                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7978                 }
7979         }
7980 }
7981
7982 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
7983                                       struct hwrm_port_phy_cfg_input *req)
7984 {
7985         u8 autoneg = bp->link_info.autoneg;
7986         u16 fw_link_speed = bp->link_info.req_link_speed;
7987         u16 advertising = bp->link_info.advertising;
7988
7989         if (autoneg & BNXT_AUTONEG_SPEED) {
7990                 req->auto_mode |=
7991                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
7992
7993                 req->enables |= cpu_to_le32(
7994                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
7995                 req->auto_link_speed_mask = cpu_to_le16(advertising);
7996
7997                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
7998                 req->flags |=
7999                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
8000         } else {
8001                 req->force_link_speed = cpu_to_le16(fw_link_speed);
8002                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
8003         }
8004
8005         /* tell chimp that the setting takes effect immediately */
8006         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
8007 }
8008
8009 int bnxt_hwrm_set_pause(struct bnxt *bp)
8010 {
8011         struct hwrm_port_phy_cfg_input req = {0};
8012         int rc;
8013
8014         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8015         bnxt_hwrm_set_pause_common(bp, &req);
8016
8017         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8018             bp->link_info.force_link_chng)
8019                 bnxt_hwrm_set_link_common(bp, &req);
8020
8021         mutex_lock(&bp->hwrm_cmd_lock);
8022         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8023         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8024                 /* since changing of pause setting doesn't trigger any link
8025                  * change event, the driver needs to update the current pause
8026                  * result upon successfully return of the phy_cfg command
8027                  */
8028                 bp->link_info.pause =
8029                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8030                 bp->link_info.auto_pause_setting = 0;
8031                 if (!bp->link_info.force_link_chng)
8032                         bnxt_report_link(bp);
8033         }
8034         bp->link_info.force_link_chng = false;
8035         mutex_unlock(&bp->hwrm_cmd_lock);
8036         return rc;
8037 }
8038
8039 static void bnxt_hwrm_set_eee(struct bnxt *bp,
8040                               struct hwrm_port_phy_cfg_input *req)
8041 {
8042         struct ethtool_eee *eee = &bp->eee;
8043
8044         if (eee->eee_enabled) {
8045                 u16 eee_speeds;
8046                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8047
8048                 if (eee->tx_lpi_enabled)
8049                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8050                 else
8051                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8052
8053                 req->flags |= cpu_to_le32(flags);
8054                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8055                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8056                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8057         } else {
8058                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8059         }
8060 }
8061
8062 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
8063 {
8064         struct hwrm_port_phy_cfg_input req = {0};
8065
8066         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8067         if (set_pause)
8068                 bnxt_hwrm_set_pause_common(bp, &req);
8069
8070         bnxt_hwrm_set_link_common(bp, &req);
8071
8072         if (set_eee)
8073                 bnxt_hwrm_set_eee(bp, &req);
8074         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8075 }
8076
8077 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8078 {
8079         struct hwrm_port_phy_cfg_input req = {0};
8080
8081         if (!BNXT_SINGLE_PF(bp))
8082                 return 0;
8083
8084         if (pci_num_vf(bp->pdev))
8085                 return 0;
8086
8087         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8088         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
8089         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8090 }
8091
8092 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8093 {
8094         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8095         struct hwrm_func_drv_if_change_input req = {0};
8096         bool resc_reinit = false;
8097         int rc;
8098
8099         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8100                 return 0;
8101
8102         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8103         if (up)
8104                 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8105         mutex_lock(&bp->hwrm_cmd_lock);
8106         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8107         if (!rc && (resp->flags &
8108                     cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
8109                 resc_reinit = true;
8110         mutex_unlock(&bp->hwrm_cmd_lock);
8111
8112         if (up && resc_reinit && BNXT_NEW_RM(bp)) {
8113                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8114
8115                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8116                 hw_resc->resv_cp_rings = 0;
8117                 hw_resc->resv_stat_ctxs = 0;
8118                 hw_resc->resv_irqs = 0;
8119                 hw_resc->resv_tx_rings = 0;
8120                 hw_resc->resv_rx_rings = 0;
8121                 hw_resc->resv_hw_ring_grps = 0;
8122                 hw_resc->resv_vnics = 0;
8123                 bp->tx_nr_rings = 0;
8124                 bp->rx_nr_rings = 0;
8125         }
8126         return rc;
8127 }
8128
8129 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8130 {
8131         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8132         struct hwrm_port_led_qcaps_input req = {0};
8133         struct bnxt_pf_info *pf = &bp->pf;
8134         int rc;
8135
8136         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8137                 return 0;
8138
8139         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8140         req.port_id = cpu_to_le16(pf->port_id);
8141         mutex_lock(&bp->hwrm_cmd_lock);
8142         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8143         if (rc) {
8144                 mutex_unlock(&bp->hwrm_cmd_lock);
8145                 return rc;
8146         }
8147         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8148                 int i;
8149
8150                 bp->num_leds = resp->num_leds;
8151                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8152                                                  bp->num_leds);
8153                 for (i = 0; i < bp->num_leds; i++) {
8154                         struct bnxt_led_info *led = &bp->leds[i];
8155                         __le16 caps = led->led_state_caps;
8156
8157                         if (!led->led_group_id ||
8158                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
8159                                 bp->num_leds = 0;
8160                                 break;
8161                         }
8162                 }
8163         }
8164         mutex_unlock(&bp->hwrm_cmd_lock);
8165         return 0;
8166 }
8167
8168 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8169 {
8170         struct hwrm_wol_filter_alloc_input req = {0};
8171         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8172         int rc;
8173
8174         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8175         req.port_id = cpu_to_le16(bp->pf.port_id);
8176         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8177         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8178         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8179         mutex_lock(&bp->hwrm_cmd_lock);
8180         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8181         if (!rc)
8182                 bp->wol_filter_id = resp->wol_filter_id;
8183         mutex_unlock(&bp->hwrm_cmd_lock);
8184         return rc;
8185 }
8186
8187 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8188 {
8189         struct hwrm_wol_filter_free_input req = {0};
8190         int rc;
8191
8192         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8193         req.port_id = cpu_to_le16(bp->pf.port_id);
8194         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8195         req.wol_filter_id = bp->wol_filter_id;
8196         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8197         return rc;
8198 }
8199
8200 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8201 {
8202         struct hwrm_wol_filter_qcfg_input req = {0};
8203         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8204         u16 next_handle = 0;
8205         int rc;
8206
8207         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8208         req.port_id = cpu_to_le16(bp->pf.port_id);
8209         req.handle = cpu_to_le16(handle);
8210         mutex_lock(&bp->hwrm_cmd_lock);
8211         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8212         if (!rc) {
8213                 next_handle = le16_to_cpu(resp->next_handle);
8214                 if (next_handle != 0) {
8215                         if (resp->wol_type ==
8216                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8217                                 bp->wol = 1;
8218                                 bp->wol_filter_id = resp->wol_filter_id;
8219                         }
8220                 }
8221         }
8222         mutex_unlock(&bp->hwrm_cmd_lock);
8223         return next_handle;
8224 }
8225
8226 static void bnxt_get_wol_settings(struct bnxt *bp)
8227 {
8228         u16 handle = 0;
8229
8230         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8231                 return;
8232
8233         do {
8234                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8235         } while (handle && handle != 0xffff);
8236 }
8237
8238 #ifdef CONFIG_BNXT_HWMON
8239 static ssize_t bnxt_show_temp(struct device *dev,
8240                               struct device_attribute *devattr, char *buf)
8241 {
8242         struct hwrm_temp_monitor_query_input req = {0};
8243         struct hwrm_temp_monitor_query_output *resp;
8244         struct bnxt *bp = dev_get_drvdata(dev);
8245         u32 temp = 0;
8246
8247         resp = bp->hwrm_cmd_resp_addr;
8248         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8249         mutex_lock(&bp->hwrm_cmd_lock);
8250         if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8251                 temp = resp->temp * 1000; /* display millidegree */
8252         mutex_unlock(&bp->hwrm_cmd_lock);
8253
8254         return sprintf(buf, "%u\n", temp);
8255 }
8256 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8257
8258 static struct attribute *bnxt_attrs[] = {
8259         &sensor_dev_attr_temp1_input.dev_attr.attr,
8260         NULL
8261 };
8262 ATTRIBUTE_GROUPS(bnxt);
8263
8264 static void bnxt_hwmon_close(struct bnxt *bp)
8265 {
8266         if (bp->hwmon_dev) {
8267                 hwmon_device_unregister(bp->hwmon_dev);
8268                 bp->hwmon_dev = NULL;
8269         }
8270 }
8271
8272 static void bnxt_hwmon_open(struct bnxt *bp)
8273 {
8274         struct pci_dev *pdev = bp->pdev;
8275
8276         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8277                                                           DRV_MODULE_NAME, bp,
8278                                                           bnxt_groups);
8279         if (IS_ERR(bp->hwmon_dev)) {
8280                 bp->hwmon_dev = NULL;
8281                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8282         }
8283 }
8284 #else
8285 static void bnxt_hwmon_close(struct bnxt *bp)
8286 {
8287 }
8288
8289 static void bnxt_hwmon_open(struct bnxt *bp)
8290 {
8291 }
8292 #endif
8293
8294 static bool bnxt_eee_config_ok(struct bnxt *bp)
8295 {
8296         struct ethtool_eee *eee = &bp->eee;
8297         struct bnxt_link_info *link_info = &bp->link_info;
8298
8299         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
8300                 return true;
8301
8302         if (eee->eee_enabled) {
8303                 u32 advertising =
8304                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
8305
8306                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8307                         eee->eee_enabled = 0;
8308                         return false;
8309                 }
8310                 if (eee->advertised & ~advertising) {
8311                         eee->advertised = advertising & eee->supported;
8312                         return false;
8313                 }
8314         }
8315         return true;
8316 }
8317
8318 static int bnxt_update_phy_setting(struct bnxt *bp)
8319 {
8320         int rc;
8321         bool update_link = false;
8322         bool update_pause = false;
8323         bool update_eee = false;
8324         struct bnxt_link_info *link_info = &bp->link_info;
8325
8326         rc = bnxt_update_link(bp, true);
8327         if (rc) {
8328                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
8329                            rc);
8330                 return rc;
8331         }
8332         if (!BNXT_SINGLE_PF(bp))
8333                 return 0;
8334
8335         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8336             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
8337             link_info->req_flow_ctrl)
8338                 update_pause = true;
8339         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8340             link_info->force_pause_setting != link_info->req_flow_ctrl)
8341                 update_pause = true;
8342         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8343                 if (BNXT_AUTO_MODE(link_info->auto_mode))
8344                         update_link = true;
8345                 if (link_info->req_link_speed != link_info->force_link_speed)
8346                         update_link = true;
8347                 if (link_info->req_duplex != link_info->duplex_setting)
8348                         update_link = true;
8349         } else {
8350                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
8351                         update_link = true;
8352                 if (link_info->advertising != link_info->auto_link_speeds)
8353                         update_link = true;
8354         }
8355
8356         /* The last close may have shutdown the link, so need to call
8357          * PHY_CFG to bring it back up.
8358          */
8359         if (!netif_carrier_ok(bp->dev))
8360                 update_link = true;
8361
8362         if (!bnxt_eee_config_ok(bp))
8363                 update_eee = true;
8364
8365         if (update_link)
8366                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
8367         else if (update_pause)
8368                 rc = bnxt_hwrm_set_pause(bp);
8369         if (rc) {
8370                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
8371                            rc);
8372                 return rc;
8373         }
8374
8375         return rc;
8376 }
8377
8378 /* Common routine to pre-map certain register block to different GRC window.
8379  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
8380  * in PF and 3 windows in VF that can be customized to map in different
8381  * register blocks.
8382  */
8383 static void bnxt_preset_reg_win(struct bnxt *bp)
8384 {
8385         if (BNXT_PF(bp)) {
8386                 /* CAG registers map to GRC window #4 */
8387                 writel(BNXT_CAG_REG_BASE,
8388                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
8389         }
8390 }
8391
8392 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
8393
8394 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8395 {
8396         int rc = 0;
8397
8398         bnxt_preset_reg_win(bp);
8399         netif_carrier_off(bp->dev);
8400         if (irq_re_init) {
8401                 /* Reserve rings now if none were reserved at driver probe. */
8402                 rc = bnxt_init_dflt_ring_mode(bp);
8403                 if (rc) {
8404                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
8405                         return rc;
8406                 }
8407         }
8408         rc = bnxt_reserve_rings(bp);
8409         if (rc)
8410                 return rc;
8411         if ((bp->flags & BNXT_FLAG_RFS) &&
8412             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
8413                 /* disable RFS if falling back to INTA */
8414                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
8415                 bp->flags &= ~BNXT_FLAG_RFS;
8416         }
8417
8418         rc = bnxt_alloc_mem(bp, irq_re_init);
8419         if (rc) {
8420                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8421                 goto open_err_free_mem;
8422         }
8423
8424         if (irq_re_init) {
8425                 bnxt_init_napi(bp);
8426                 rc = bnxt_request_irq(bp);
8427                 if (rc) {
8428                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
8429                         goto open_err_irq;
8430                 }
8431         }
8432
8433         bnxt_enable_napi(bp);
8434         bnxt_debug_dev_init(bp);
8435
8436         rc = bnxt_init_nic(bp, irq_re_init);
8437         if (rc) {
8438                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8439                 goto open_err;
8440         }
8441
8442         if (link_re_init) {
8443                 mutex_lock(&bp->link_lock);
8444                 rc = bnxt_update_phy_setting(bp);
8445                 mutex_unlock(&bp->link_lock);
8446                 if (rc) {
8447                         netdev_warn(bp->dev, "failed to update phy settings\n");
8448                         if (BNXT_SINGLE_PF(bp)) {
8449                                 bp->link_info.phy_retry = true;
8450                                 bp->link_info.phy_retry_expires =
8451                                         jiffies + 5 * HZ;
8452                         }
8453                 }
8454         }
8455
8456         if (irq_re_init)
8457                 udp_tunnel_get_rx_info(bp->dev);
8458
8459         set_bit(BNXT_STATE_OPEN, &bp->state);
8460         bnxt_enable_int(bp);
8461         /* Enable TX queues */
8462         bnxt_tx_enable(bp);
8463         mod_timer(&bp->timer, jiffies + bp->current_interval);
8464         /* Poll link status and check for SFP+ module status */
8465         bnxt_get_port_module_status(bp);
8466
8467         /* VF-reps may need to be re-opened after the PF is re-opened */
8468         if (BNXT_PF(bp))
8469                 bnxt_vf_reps_open(bp);
8470         return 0;
8471
8472 open_err:
8473         bnxt_debug_dev_exit(bp);
8474         bnxt_disable_napi(bp);
8475
8476 open_err_irq:
8477         bnxt_del_napi(bp);
8478
8479 open_err_free_mem:
8480         bnxt_free_skbs(bp);
8481         bnxt_free_irq(bp);
8482         bnxt_free_mem(bp, true);
8483         return rc;
8484 }
8485
8486 /* rtnl_lock held */
8487 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8488 {
8489         int rc = 0;
8490
8491         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
8492         if (rc) {
8493                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
8494                 dev_close(bp->dev);
8495         }
8496         return rc;
8497 }
8498
8499 /* rtnl_lock held, open the NIC half way by allocating all resources, but
8500  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
8501  * self tests.
8502  */
8503 int bnxt_half_open_nic(struct bnxt *bp)
8504 {
8505         int rc = 0;
8506
8507         rc = bnxt_alloc_mem(bp, false);
8508         if (rc) {
8509                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8510                 goto half_open_err;
8511         }
8512         rc = bnxt_init_nic(bp, false);
8513         if (rc) {
8514                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8515                 goto half_open_err;
8516         }
8517         return 0;
8518
8519 half_open_err:
8520         bnxt_free_skbs(bp);
8521         bnxt_free_mem(bp, false);
8522         dev_close(bp->dev);
8523         return rc;
8524 }
8525
8526 /* rtnl_lock held, this call can only be made after a previous successful
8527  * call to bnxt_half_open_nic().
8528  */
8529 void bnxt_half_close_nic(struct bnxt *bp)
8530 {
8531         bnxt_hwrm_resource_free(bp, false, false);
8532         bnxt_free_skbs(bp);
8533         bnxt_free_mem(bp, false);
8534 }
8535
8536 static int bnxt_open(struct net_device *dev)
8537 {
8538         struct bnxt *bp = netdev_priv(dev);
8539         int rc;
8540
8541         bnxt_hwrm_if_change(bp, true);
8542         rc = __bnxt_open_nic(bp, true, true);
8543         if (rc)
8544                 bnxt_hwrm_if_change(bp, false);
8545
8546         bnxt_hwmon_open(bp);
8547
8548         return rc;
8549 }
8550
8551 static bool bnxt_drv_busy(struct bnxt *bp)
8552 {
8553         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
8554                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
8555 }
8556
8557 static void bnxt_get_ring_stats(struct bnxt *bp,
8558                                 struct rtnl_link_stats64 *stats);
8559
8560 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
8561                              bool link_re_init)
8562 {
8563         /* Close the VF-reps before closing PF */
8564         if (BNXT_PF(bp))
8565                 bnxt_vf_reps_close(bp);
8566
8567         /* Change device state to avoid TX queue wake up's */
8568         bnxt_tx_disable(bp);
8569
8570         clear_bit(BNXT_STATE_OPEN, &bp->state);
8571         smp_mb__after_atomic();
8572         while (bnxt_drv_busy(bp))
8573                 msleep(20);
8574
8575         /* Flush rings and and disable interrupts */
8576         bnxt_shutdown_nic(bp, irq_re_init);
8577
8578         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
8579
8580         bnxt_debug_dev_exit(bp);
8581         bnxt_disable_napi(bp);
8582         del_timer_sync(&bp->timer);
8583         bnxt_free_skbs(bp);
8584
8585         /* Save ring stats before shutdown */
8586         if (bp->bnapi)
8587                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
8588         if (irq_re_init) {
8589                 bnxt_free_irq(bp);
8590                 bnxt_del_napi(bp);
8591         }
8592         bnxt_free_mem(bp, irq_re_init);
8593 }
8594
8595 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8596 {
8597         int rc = 0;
8598
8599 #ifdef CONFIG_BNXT_SRIOV
8600         if (bp->sriov_cfg) {
8601                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
8602                                                       !bp->sriov_cfg,
8603                                                       BNXT_SRIOV_CFG_WAIT_TMO);
8604                 if (rc)
8605                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
8606         }
8607 #endif
8608         __bnxt_close_nic(bp, irq_re_init, link_re_init);
8609         return rc;
8610 }
8611
8612 static int bnxt_close(struct net_device *dev)
8613 {
8614         struct bnxt *bp = netdev_priv(dev);
8615
8616         bnxt_hwmon_close(bp);
8617         bnxt_close_nic(bp, true, true);
8618         bnxt_hwrm_shutdown_link(bp);
8619         bnxt_hwrm_if_change(bp, false);
8620         return 0;
8621 }
8622
8623 /* rtnl_lock held */
8624 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8625 {
8626         switch (cmd) {
8627         case SIOCGMIIPHY:
8628                 /* fallthru */
8629         case SIOCGMIIREG: {
8630                 if (!netif_running(dev))
8631                         return -EAGAIN;
8632
8633                 return 0;
8634         }
8635
8636         case SIOCSMIIREG:
8637                 if (!netif_running(dev))
8638                         return -EAGAIN;
8639
8640                 return 0;
8641
8642         default:
8643                 /* do nothing */
8644                 break;
8645         }
8646         return -EOPNOTSUPP;
8647 }
8648
8649 static void bnxt_get_ring_stats(struct bnxt *bp,
8650                                 struct rtnl_link_stats64 *stats)
8651 {
8652         int i;
8653
8654
8655         for (i = 0; i < bp->cp_nr_rings; i++) {
8656                 struct bnxt_napi *bnapi = bp->bnapi[i];
8657                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8658                 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
8659
8660                 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
8661                 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
8662                 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
8663
8664                 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
8665                 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
8666                 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
8667
8668                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
8669                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
8670                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
8671
8672                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
8673                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
8674                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
8675
8676                 stats->rx_missed_errors +=
8677                         le64_to_cpu(hw_stats->rx_discard_pkts);
8678
8679                 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
8680
8681                 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
8682         }
8683 }
8684
8685 static void bnxt_add_prev_stats(struct bnxt *bp,
8686                                 struct rtnl_link_stats64 *stats)
8687 {
8688         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
8689
8690         stats->rx_packets += prev_stats->rx_packets;
8691         stats->tx_packets += prev_stats->tx_packets;
8692         stats->rx_bytes += prev_stats->rx_bytes;
8693         stats->tx_bytes += prev_stats->tx_bytes;
8694         stats->rx_missed_errors += prev_stats->rx_missed_errors;
8695         stats->multicast += prev_stats->multicast;
8696         stats->tx_dropped += prev_stats->tx_dropped;
8697 }
8698
8699 static void
8700 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
8701 {
8702         struct bnxt *bp = netdev_priv(dev);
8703
8704         set_bit(BNXT_STATE_READ_STATS, &bp->state);
8705         /* Make sure bnxt_close_nic() sees that we are reading stats before
8706          * we check the BNXT_STATE_OPEN flag.
8707          */
8708         smp_mb__after_atomic();
8709         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
8710                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
8711                 *stats = bp->net_stats_prev;
8712                 return;
8713         }
8714
8715         bnxt_get_ring_stats(bp, stats);
8716         bnxt_add_prev_stats(bp, stats);
8717
8718         if (bp->flags & BNXT_FLAG_PORT_STATS) {
8719                 struct rx_port_stats *rx = bp->hw_rx_port_stats;
8720                 struct tx_port_stats *tx = bp->hw_tx_port_stats;
8721
8722                 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
8723                 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
8724                 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
8725                                           le64_to_cpu(rx->rx_ovrsz_frames) +
8726                                           le64_to_cpu(rx->rx_runt_frames);
8727                 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
8728                                    le64_to_cpu(rx->rx_jbr_frames);
8729                 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
8730                 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
8731                 stats->tx_errors = le64_to_cpu(tx->tx_err);
8732         }
8733         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
8734 }
8735
8736 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
8737 {
8738         struct net_device *dev = bp->dev;
8739         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8740         struct netdev_hw_addr *ha;
8741         u8 *haddr;
8742         int mc_count = 0;
8743         bool update = false;
8744         int off = 0;
8745
8746         netdev_for_each_mc_addr(ha, dev) {
8747                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
8748                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8749                         vnic->mc_list_count = 0;
8750                         return false;
8751                 }
8752                 haddr = ha->addr;
8753                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
8754                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
8755                         update = true;
8756                 }
8757                 off += ETH_ALEN;
8758                 mc_count++;
8759         }
8760         if (mc_count)
8761                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
8762
8763         if (mc_count != vnic->mc_list_count) {
8764                 vnic->mc_list_count = mc_count;
8765                 update = true;
8766         }
8767         return update;
8768 }
8769
8770 static bool bnxt_uc_list_updated(struct bnxt *bp)
8771 {
8772         struct net_device *dev = bp->dev;
8773         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8774         struct netdev_hw_addr *ha;
8775         int off = 0;
8776
8777         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
8778                 return true;
8779
8780         netdev_for_each_uc_addr(ha, dev) {
8781                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
8782                         return true;
8783
8784                 off += ETH_ALEN;
8785         }
8786         return false;
8787 }
8788
8789 static void bnxt_set_rx_mode(struct net_device *dev)
8790 {
8791         struct bnxt *bp = netdev_priv(dev);
8792         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8793         u32 mask = vnic->rx_mask;
8794         bool mc_update = false;
8795         bool uc_update;
8796
8797         if (!netif_running(dev))
8798                 return;
8799
8800         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
8801                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
8802                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
8803                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
8804
8805         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
8806                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8807
8808         uc_update = bnxt_uc_list_updated(bp);
8809
8810         if (dev->flags & IFF_BROADCAST)
8811                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8812         if (dev->flags & IFF_ALLMULTI) {
8813                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8814                 vnic->mc_list_count = 0;
8815         } else {
8816                 mc_update = bnxt_mc_list_updated(bp, &mask);
8817         }
8818
8819         if (mask != vnic->rx_mask || uc_update || mc_update) {
8820                 vnic->rx_mask = mask;
8821
8822                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
8823                 bnxt_queue_sp_work(bp);
8824         }
8825 }
8826
8827 static int bnxt_cfg_rx_mode(struct bnxt *bp)
8828 {
8829         struct net_device *dev = bp->dev;
8830         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8831         struct netdev_hw_addr *ha;
8832         int i, off = 0, rc;
8833         bool uc_update;
8834
8835         netif_addr_lock_bh(dev);
8836         uc_update = bnxt_uc_list_updated(bp);
8837         netif_addr_unlock_bh(dev);
8838
8839         if (!uc_update)
8840                 goto skip_uc;
8841
8842         mutex_lock(&bp->hwrm_cmd_lock);
8843         for (i = 1; i < vnic->uc_filter_count; i++) {
8844                 struct hwrm_cfa_l2_filter_free_input req = {0};
8845
8846                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
8847                                        -1);
8848
8849                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
8850
8851                 rc = _hwrm_send_message(bp, &req, sizeof(req),
8852                                         HWRM_CMD_TIMEOUT);
8853         }
8854         mutex_unlock(&bp->hwrm_cmd_lock);
8855
8856         vnic->uc_filter_count = 1;
8857
8858         netif_addr_lock_bh(dev);
8859         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
8860                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8861         } else {
8862                 netdev_for_each_uc_addr(ha, dev) {
8863                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
8864                         off += ETH_ALEN;
8865                         vnic->uc_filter_count++;
8866                 }
8867         }
8868         netif_addr_unlock_bh(dev);
8869
8870         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
8871                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
8872                 if (rc) {
8873                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
8874                                    rc);
8875                         vnic->uc_filter_count = i;
8876                         return rc;
8877                 }
8878         }
8879
8880 skip_uc:
8881         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
8882         if (rc)
8883                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
8884                            rc);
8885
8886         return rc;
8887 }
8888
8889 static bool bnxt_can_reserve_rings(struct bnxt *bp)
8890 {
8891 #ifdef CONFIG_BNXT_SRIOV
8892         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
8893                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8894
8895                 /* No minimum rings were provisioned by the PF.  Don't
8896                  * reserve rings by default when device is down.
8897                  */
8898                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
8899                         return true;
8900
8901                 if (!netif_running(bp->dev))
8902                         return false;
8903         }
8904 #endif
8905         return true;
8906 }
8907
8908 /* If the chip and firmware supports RFS */
8909 static bool bnxt_rfs_supported(struct bnxt *bp)
8910 {
8911         if (bp->flags & BNXT_FLAG_CHIP_P5)
8912                 return false;
8913         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
8914                 return true;
8915         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8916                 return true;
8917         return false;
8918 }
8919
8920 /* If runtime conditions support RFS */
8921 static bool bnxt_rfs_capable(struct bnxt *bp)
8922 {
8923 #ifdef CONFIG_RFS_ACCEL
8924         int vnics, max_vnics, max_rss_ctxs;
8925
8926         if (bp->flags & BNXT_FLAG_CHIP_P5)
8927                 return false;
8928         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
8929                 return false;
8930
8931         vnics = 1 + bp->rx_nr_rings;
8932         max_vnics = bnxt_get_max_func_vnics(bp);
8933         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
8934
8935         /* RSS contexts not a limiting factor */
8936         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8937                 max_rss_ctxs = max_vnics;
8938         if (vnics > max_vnics || vnics > max_rss_ctxs) {
8939                 if (bp->rx_nr_rings > 1)
8940                         netdev_warn(bp->dev,
8941                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
8942                                     min(max_rss_ctxs - 1, max_vnics - 1));
8943                 return false;
8944         }
8945
8946         if (!BNXT_NEW_RM(bp))
8947                 return true;
8948
8949         if (vnics == bp->hw_resc.resv_vnics)
8950                 return true;
8951
8952         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
8953         if (vnics <= bp->hw_resc.resv_vnics)
8954                 return true;
8955
8956         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
8957         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
8958         return false;
8959 #else
8960         return false;
8961 #endif
8962 }
8963
8964 static netdev_features_t bnxt_fix_features(struct net_device *dev,
8965                                            netdev_features_t features)
8966 {
8967         struct bnxt *bp = netdev_priv(dev);
8968
8969         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
8970                 features &= ~NETIF_F_NTUPLE;
8971
8972         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
8973                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
8974
8975         if (!(features & NETIF_F_GRO))
8976                 features &= ~NETIF_F_GRO_HW;
8977
8978         if (features & NETIF_F_GRO_HW)
8979                 features &= ~NETIF_F_LRO;
8980
8981         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
8982          * turned on or off together.
8983          */
8984         if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
8985             (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
8986                 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
8987                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
8988                                       NETIF_F_HW_VLAN_STAG_RX);
8989                 else
8990                         features |= NETIF_F_HW_VLAN_CTAG_RX |
8991                                     NETIF_F_HW_VLAN_STAG_RX;
8992         }
8993 #ifdef CONFIG_BNXT_SRIOV
8994         if (BNXT_VF(bp)) {
8995                 if (bp->vf.vlan) {
8996                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
8997                                       NETIF_F_HW_VLAN_STAG_RX);
8998                 }
8999         }
9000 #endif
9001         return features;
9002 }
9003
9004 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
9005 {
9006         struct bnxt *bp = netdev_priv(dev);
9007         u32 flags = bp->flags;
9008         u32 changes;
9009         int rc = 0;
9010         bool re_init = false;
9011         bool update_tpa = false;
9012
9013         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
9014         if (features & NETIF_F_GRO_HW)
9015                 flags |= BNXT_FLAG_GRO;
9016         else if (features & NETIF_F_LRO)
9017                 flags |= BNXT_FLAG_LRO;
9018
9019         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9020                 flags &= ~BNXT_FLAG_TPA;
9021
9022         if (features & NETIF_F_HW_VLAN_CTAG_RX)
9023                 flags |= BNXT_FLAG_STRIP_VLAN;
9024
9025         if (features & NETIF_F_NTUPLE)
9026                 flags |= BNXT_FLAG_RFS;
9027
9028         changes = flags ^ bp->flags;
9029         if (changes & BNXT_FLAG_TPA) {
9030                 update_tpa = true;
9031                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
9032                     (flags & BNXT_FLAG_TPA) == 0)
9033                         re_init = true;
9034         }
9035
9036         if (changes & ~BNXT_FLAG_TPA)
9037                 re_init = true;
9038
9039         if (flags != bp->flags) {
9040                 u32 old_flags = bp->flags;
9041
9042                 bp->flags = flags;
9043
9044                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9045                         if (update_tpa)
9046                                 bnxt_set_ring_params(bp);
9047                         return rc;
9048                 }
9049
9050                 if (re_init) {
9051                         bnxt_close_nic(bp, false, false);
9052                         if (update_tpa)
9053                                 bnxt_set_ring_params(bp);
9054
9055                         return bnxt_open_nic(bp, false, false);
9056                 }
9057                 if (update_tpa) {
9058                         rc = bnxt_set_tpa(bp,
9059                                           (flags & BNXT_FLAG_TPA) ?
9060                                           true : false);
9061                         if (rc)
9062                                 bp->flags = old_flags;
9063                 }
9064         }
9065         return rc;
9066 }
9067
9068 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9069                                        u32 ring_id, u32 *prod, u32 *cons)
9070 {
9071         struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9072         struct hwrm_dbg_ring_info_get_input req = {0};
9073         int rc;
9074
9075         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9076         req.ring_type = ring_type;
9077         req.fw_ring_id = cpu_to_le32(ring_id);
9078         mutex_lock(&bp->hwrm_cmd_lock);
9079         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9080         if (!rc) {
9081                 *prod = le32_to_cpu(resp->producer_index);
9082                 *cons = le32_to_cpu(resp->consumer_index);
9083         }
9084         mutex_unlock(&bp->hwrm_cmd_lock);
9085         return rc;
9086 }
9087
9088 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9089 {
9090         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9091         int i = bnapi->index;
9092
9093         if (!txr)
9094                 return;
9095
9096         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9097                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9098                     txr->tx_cons);
9099 }
9100
9101 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9102 {
9103         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9104         int i = bnapi->index;
9105
9106         if (!rxr)
9107                 return;
9108
9109         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9110                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9111                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9112                     rxr->rx_sw_agg_prod);
9113 }
9114
9115 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9116 {
9117         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9118         int i = bnapi->index;
9119
9120         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9121                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9122 }
9123
9124 static void bnxt_dbg_dump_states(struct bnxt *bp)
9125 {
9126         int i;
9127         struct bnxt_napi *bnapi;
9128
9129         for (i = 0; i < bp->cp_nr_rings; i++) {
9130                 bnapi = bp->bnapi[i];
9131                 if (netif_msg_drv(bp)) {
9132                         bnxt_dump_tx_sw_state(bnapi);
9133                         bnxt_dump_rx_sw_state(bnapi);
9134                         bnxt_dump_cp_sw_state(bnapi);
9135                 }
9136         }
9137 }
9138
9139 static void bnxt_reset_task(struct bnxt *bp, bool silent)
9140 {
9141         if (!silent)
9142                 bnxt_dbg_dump_states(bp);
9143         if (netif_running(bp->dev)) {
9144                 int rc;
9145
9146                 if (!silent)
9147                         bnxt_ulp_stop(bp);
9148                 bnxt_close_nic(bp, false, false);
9149                 rc = bnxt_open_nic(bp, false, false);
9150                 if (!silent && !rc)
9151                         bnxt_ulp_start(bp);
9152         }
9153 }
9154
9155 static void bnxt_tx_timeout(struct net_device *dev)
9156 {
9157         struct bnxt *bp = netdev_priv(dev);
9158
9159         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
9160         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
9161         bnxt_queue_sp_work(bp);
9162 }
9163
9164 static void bnxt_timer(struct timer_list *t)
9165 {
9166         struct bnxt *bp = from_timer(bp, t, timer);
9167         struct net_device *dev = bp->dev;
9168
9169         if (!netif_running(dev))
9170                 return;
9171
9172         if (atomic_read(&bp->intr_sem) != 0)
9173                 goto bnxt_restart_timer;
9174
9175         if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
9176             bp->stats_coal_ticks) {
9177                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
9178                 bnxt_queue_sp_work(bp);
9179         }
9180
9181         if (bnxt_tc_flower_enabled(bp)) {
9182                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
9183                 bnxt_queue_sp_work(bp);
9184         }
9185
9186         if (bp->link_info.phy_retry) {
9187                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
9188                         bp->link_info.phy_retry = 0;
9189                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
9190                 } else {
9191                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
9192                         bnxt_queue_sp_work(bp);
9193                 }
9194         }
9195
9196         if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
9197                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
9198                 bnxt_queue_sp_work(bp);
9199         }
9200 bnxt_restart_timer:
9201         mod_timer(&bp->timer, jiffies + bp->current_interval);
9202 }
9203
9204 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
9205 {
9206         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
9207          * set.  If the device is being closed, bnxt_close() may be holding
9208          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
9209          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
9210          */
9211         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9212         rtnl_lock();
9213 }
9214
9215 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
9216 {
9217         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9218         rtnl_unlock();
9219 }
9220
9221 /* Only called from bnxt_sp_task() */
9222 static void bnxt_reset(struct bnxt *bp, bool silent)
9223 {
9224         bnxt_rtnl_lock_sp(bp);
9225         if (test_bit(BNXT_STATE_OPEN, &bp->state))
9226                 bnxt_reset_task(bp, silent);
9227         bnxt_rtnl_unlock_sp(bp);
9228 }
9229
9230 static void bnxt_chk_missed_irq(struct bnxt *bp)
9231 {
9232         int i;
9233
9234         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9235                 return;
9236
9237         for (i = 0; i < bp->cp_nr_rings; i++) {
9238                 struct bnxt_napi *bnapi = bp->bnapi[i];
9239                 struct bnxt_cp_ring_info *cpr;
9240                 u32 fw_ring_id;
9241                 int j;
9242
9243                 if (!bnapi)
9244                         continue;
9245
9246                 cpr = &bnapi->cp_ring;
9247                 for (j = 0; j < 2; j++) {
9248                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
9249                         u32 val[2];
9250
9251                         if (!cpr2 || cpr2->has_more_work ||
9252                             !bnxt_has_work(bp, cpr2))
9253                                 continue;
9254
9255                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
9256                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
9257                                 continue;
9258                         }
9259                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
9260                         bnxt_dbg_hwrm_ring_info_get(bp,
9261                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
9262                                 fw_ring_id, &val[0], &val[1]);
9263                         cpr->missed_irqs++;
9264                 }
9265         }
9266 }
9267
9268 static void bnxt_cfg_ntp_filters(struct bnxt *);
9269
9270 static void bnxt_sp_task(struct work_struct *work)
9271 {
9272         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
9273
9274         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9275         smp_mb__after_atomic();
9276         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9277                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9278                 return;
9279         }
9280
9281         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
9282                 bnxt_cfg_rx_mode(bp);
9283
9284         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
9285                 bnxt_cfg_ntp_filters(bp);
9286         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
9287                 bnxt_hwrm_exec_fwd_req(bp);
9288         if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9289                 bnxt_hwrm_tunnel_dst_port_alloc(
9290                         bp, bp->vxlan_port,
9291                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9292         }
9293         if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9294                 bnxt_hwrm_tunnel_dst_port_free(
9295                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9296         }
9297         if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9298                 bnxt_hwrm_tunnel_dst_port_alloc(
9299                         bp, bp->nge_port,
9300                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9301         }
9302         if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9303                 bnxt_hwrm_tunnel_dst_port_free(
9304                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9305         }
9306         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
9307                 bnxt_hwrm_port_qstats(bp);
9308                 bnxt_hwrm_port_qstats_ext(bp);
9309         }
9310
9311         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
9312                 int rc;
9313
9314                 mutex_lock(&bp->link_lock);
9315                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
9316                                        &bp->sp_event))
9317                         bnxt_hwrm_phy_qcaps(bp);
9318
9319                 rc = bnxt_update_link(bp, true);
9320                 mutex_unlock(&bp->link_lock);
9321                 if (rc)
9322                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
9323                                    rc);
9324         }
9325         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
9326                 int rc;
9327
9328                 mutex_lock(&bp->link_lock);
9329                 rc = bnxt_update_phy_setting(bp);
9330                 mutex_unlock(&bp->link_lock);
9331                 if (rc) {
9332                         netdev_warn(bp->dev, "update phy settings retry failed\n");
9333                 } else {
9334                         bp->link_info.phy_retry = false;
9335                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
9336                 }
9337         }
9338         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
9339                 mutex_lock(&bp->link_lock);
9340                 bnxt_get_port_module_status(bp);
9341                 mutex_unlock(&bp->link_lock);
9342         }
9343
9344         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
9345                 bnxt_tc_flow_stats_work(bp);
9346
9347         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
9348                 bnxt_chk_missed_irq(bp);
9349
9350         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
9351          * must be the last functions to be called before exiting.
9352          */
9353         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
9354                 bnxt_reset(bp, false);
9355
9356         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
9357                 bnxt_reset(bp, true);
9358
9359         smp_mb__before_atomic();
9360         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9361 }
9362
9363 /* Under rtnl_lock */
9364 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
9365                      int tx_xdp)
9366 {
9367         int max_rx, max_tx, tx_sets = 1;
9368         int tx_rings_needed, stats;
9369         int rx_rings = rx;
9370         int cp, vnics, rc;
9371
9372         if (tcs)
9373                 tx_sets = tcs;
9374
9375         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
9376         if (rc)
9377                 return rc;
9378
9379         if (max_rx < rx)
9380                 return -ENOMEM;
9381
9382         tx_rings_needed = tx * tx_sets + tx_xdp;
9383         if (max_tx < tx_rings_needed)
9384                 return -ENOMEM;
9385
9386         vnics = 1;
9387         if (bp->flags & BNXT_FLAG_RFS)
9388                 vnics += rx_rings;
9389
9390         if (bp->flags & BNXT_FLAG_AGG_RINGS)
9391                 rx_rings <<= 1;
9392         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
9393         stats = cp;
9394         if (BNXT_NEW_RM(bp)) {
9395                 cp += bnxt_get_ulp_msix_num(bp);
9396                 stats += bnxt_get_ulp_stat_ctxs(bp);
9397         }
9398         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
9399                                      stats, vnics);
9400 }
9401
9402 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
9403 {
9404         if (bp->bar2) {
9405                 pci_iounmap(pdev, bp->bar2);
9406                 bp->bar2 = NULL;
9407         }
9408
9409         if (bp->bar1) {
9410                 pci_iounmap(pdev, bp->bar1);
9411                 bp->bar1 = NULL;
9412         }
9413
9414         if (bp->bar0) {
9415                 pci_iounmap(pdev, bp->bar0);
9416                 bp->bar0 = NULL;
9417         }
9418 }
9419
9420 static void bnxt_cleanup_pci(struct bnxt *bp)
9421 {
9422         bnxt_unmap_bars(bp, bp->pdev);
9423         pci_release_regions(bp->pdev);
9424         pci_disable_device(bp->pdev);
9425 }
9426
9427 static void bnxt_init_dflt_coal(struct bnxt *bp)
9428 {
9429         struct bnxt_coal *coal;
9430
9431         /* Tick values in micro seconds.
9432          * 1 coal_buf x bufs_per_record = 1 completion record.
9433          */
9434         coal = &bp->rx_coal;
9435         coal->coal_ticks = 10;
9436         coal->coal_bufs = 30;
9437         coal->coal_ticks_irq = 1;
9438         coal->coal_bufs_irq = 2;
9439         coal->idle_thresh = 50;
9440         coal->bufs_per_record = 2;
9441         coal->budget = 64;              /* NAPI budget */
9442
9443         coal = &bp->tx_coal;
9444         coal->coal_ticks = 28;
9445         coal->coal_bufs = 30;
9446         coal->coal_ticks_irq = 2;
9447         coal->coal_bufs_irq = 2;
9448         coal->bufs_per_record = 1;
9449
9450         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
9451 }
9452
9453 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
9454 {
9455         int rc;
9456         struct bnxt *bp = netdev_priv(dev);
9457
9458         SET_NETDEV_DEV(dev, &pdev->dev);
9459
9460         /* enable device (incl. PCI PM wakeup), and bus-mastering */
9461         rc = pci_enable_device(pdev);
9462         if (rc) {
9463                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9464                 goto init_err;
9465         }
9466
9467         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9468                 dev_err(&pdev->dev,
9469                         "Cannot find PCI device base address, aborting\n");
9470                 rc = -ENODEV;
9471                 goto init_err_disable;
9472         }
9473
9474         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9475         if (rc) {
9476                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9477                 goto init_err_disable;
9478         }
9479
9480         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
9481             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9482                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
9483                 goto init_err_disable;
9484         }
9485
9486         pci_set_master(pdev);
9487
9488         bp->dev = dev;
9489         bp->pdev = pdev;
9490
9491         bp->bar0 = pci_ioremap_bar(pdev, 0);
9492         if (!bp->bar0) {
9493                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9494                 rc = -ENOMEM;
9495                 goto init_err_release;
9496         }
9497
9498         bp->bar1 = pci_ioremap_bar(pdev, 2);
9499         if (!bp->bar1) {
9500                 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
9501                 rc = -ENOMEM;
9502                 goto init_err_release;
9503         }
9504
9505         bp->bar2 = pci_ioremap_bar(pdev, 4);
9506         if (!bp->bar2) {
9507                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
9508                 rc = -ENOMEM;
9509                 goto init_err_release;
9510         }
9511
9512         pci_enable_pcie_error_reporting(pdev);
9513
9514         INIT_WORK(&bp->sp_task, bnxt_sp_task);
9515
9516         spin_lock_init(&bp->ntp_fltr_lock);
9517 #if BITS_PER_LONG == 32
9518         spin_lock_init(&bp->db_lock);
9519 #endif
9520
9521         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
9522         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
9523
9524         bnxt_init_dflt_coal(bp);
9525
9526         timer_setup(&bp->timer, bnxt_timer, 0);
9527         bp->current_interval = BNXT_TIMER_INTERVAL;
9528
9529         clear_bit(BNXT_STATE_OPEN, &bp->state);
9530         return 0;
9531
9532 init_err_release:
9533         bnxt_unmap_bars(bp, pdev);
9534         pci_release_regions(pdev);
9535
9536 init_err_disable:
9537         pci_disable_device(pdev);
9538
9539 init_err:
9540         return rc;
9541 }
9542
9543 /* rtnl_lock held */
9544 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
9545 {
9546         struct sockaddr *addr = p;
9547         struct bnxt *bp = netdev_priv(dev);
9548         int rc = 0;
9549
9550         if (!is_valid_ether_addr(addr->sa_data))
9551                 return -EADDRNOTAVAIL;
9552
9553         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
9554                 return 0;
9555
9556         rc = bnxt_approve_mac(bp, addr->sa_data, true);
9557         if (rc)
9558                 return rc;
9559
9560         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9561         if (netif_running(dev)) {
9562                 bnxt_close_nic(bp, false, false);
9563                 rc = bnxt_open_nic(bp, false, false);
9564         }
9565
9566         return rc;
9567 }
9568
9569 /* rtnl_lock held */
9570 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
9571 {
9572         struct bnxt *bp = netdev_priv(dev);
9573
9574         if (netif_running(dev))
9575                 bnxt_close_nic(bp, false, false);
9576
9577         dev->mtu = new_mtu;
9578         bnxt_set_ring_params(bp);
9579
9580         if (netif_running(dev))
9581                 return bnxt_open_nic(bp, false, false);
9582
9583         return 0;
9584 }
9585
9586 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
9587 {
9588         struct bnxt *bp = netdev_priv(dev);
9589         bool sh = false;
9590         int rc;
9591
9592         if (tc > bp->max_tc) {
9593                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
9594                            tc, bp->max_tc);
9595                 return -EINVAL;
9596         }
9597
9598         if (netdev_get_num_tc(dev) == tc)
9599                 return 0;
9600
9601         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
9602                 sh = true;
9603
9604         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
9605                               sh, tc, bp->tx_nr_rings_xdp);
9606         if (rc)
9607                 return rc;
9608
9609         /* Needs to close the device and do hw resource re-allocations */
9610         if (netif_running(bp->dev))
9611                 bnxt_close_nic(bp, true, false);
9612
9613         if (tc) {
9614                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
9615                 netdev_set_num_tc(dev, tc);
9616         } else {
9617                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
9618                 netdev_reset_tc(dev);
9619         }
9620         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
9621         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9622                                bp->tx_nr_rings + bp->rx_nr_rings;
9623
9624         if (netif_running(bp->dev))
9625                 return bnxt_open_nic(bp, true, false);
9626
9627         return 0;
9628 }
9629
9630 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9631                                   void *cb_priv)
9632 {
9633         struct bnxt *bp = cb_priv;
9634
9635         if (!bnxt_tc_flower_enabled(bp) ||
9636             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
9637                 return -EOPNOTSUPP;
9638
9639         switch (type) {
9640         case TC_SETUP_CLSFLOWER:
9641                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
9642         default:
9643                 return -EOPNOTSUPP;
9644         }
9645 }
9646
9647 static int bnxt_setup_tc_block(struct net_device *dev,
9648                                struct tc_block_offload *f)
9649 {
9650         struct bnxt *bp = netdev_priv(dev);
9651
9652         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9653                 return -EOPNOTSUPP;
9654
9655         switch (f->command) {
9656         case TC_BLOCK_BIND:
9657                 return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
9658                                              bp, bp, f->extack);
9659         case TC_BLOCK_UNBIND:
9660                 tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
9661                 return 0;
9662         default:
9663                 return -EOPNOTSUPP;
9664         }
9665 }
9666
9667 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
9668                          void *type_data)
9669 {
9670         switch (type) {
9671         case TC_SETUP_BLOCK:
9672                 return bnxt_setup_tc_block(dev, type_data);
9673         case TC_SETUP_QDISC_MQPRIO: {
9674                 struct tc_mqprio_qopt *mqprio = type_data;
9675
9676                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9677
9678                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
9679         }
9680         default:
9681                 return -EOPNOTSUPP;
9682         }
9683 }
9684
9685 #ifdef CONFIG_RFS_ACCEL
9686 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
9687                             struct bnxt_ntuple_filter *f2)
9688 {
9689         struct flow_keys *keys1 = &f1->fkeys;
9690         struct flow_keys *keys2 = &f2->fkeys;
9691
9692         if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
9693             keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
9694             keys1->ports.ports == keys2->ports.ports &&
9695             keys1->basic.ip_proto == keys2->basic.ip_proto &&
9696             keys1->basic.n_proto == keys2->basic.n_proto &&
9697             keys1->control.flags == keys2->control.flags &&
9698             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
9699             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
9700                 return true;
9701
9702         return false;
9703 }
9704
9705 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
9706                               u16 rxq_index, u32 flow_id)
9707 {
9708         struct bnxt *bp = netdev_priv(dev);
9709         struct bnxt_ntuple_filter *fltr, *new_fltr;
9710         struct flow_keys *fkeys;
9711         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
9712         int rc = 0, idx, bit_id, l2_idx = 0;
9713         struct hlist_head *head;
9714
9715         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
9716                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9717                 int off = 0, j;
9718
9719                 netif_addr_lock_bh(dev);
9720                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
9721                         if (ether_addr_equal(eth->h_dest,
9722                                              vnic->uc_list + off)) {
9723                                 l2_idx = j + 1;
9724                                 break;
9725                         }
9726                 }
9727                 netif_addr_unlock_bh(dev);
9728                 if (!l2_idx)
9729                         return -EINVAL;
9730         }
9731         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
9732         if (!new_fltr)
9733                 return -ENOMEM;
9734
9735         fkeys = &new_fltr->fkeys;
9736         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
9737                 rc = -EPROTONOSUPPORT;
9738                 goto err_free;
9739         }
9740
9741         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
9742              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
9743             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
9744              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
9745                 rc = -EPROTONOSUPPORT;
9746                 goto err_free;
9747         }
9748         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
9749             bp->hwrm_spec_code < 0x10601) {
9750                 rc = -EPROTONOSUPPORT;
9751                 goto err_free;
9752         }
9753         if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
9754             bp->hwrm_spec_code < 0x10601) {
9755                 rc = -EPROTONOSUPPORT;
9756                 goto err_free;
9757         }
9758
9759         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
9760         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
9761
9762         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
9763         head = &bp->ntp_fltr_hash_tbl[idx];
9764         rcu_read_lock();
9765         hlist_for_each_entry_rcu(fltr, head, hash) {
9766                 if (bnxt_fltr_match(fltr, new_fltr)) {
9767                         rcu_read_unlock();
9768                         rc = 0;
9769                         goto err_free;
9770                 }
9771         }
9772         rcu_read_unlock();
9773
9774         spin_lock_bh(&bp->ntp_fltr_lock);
9775         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
9776                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
9777         if (bit_id < 0) {
9778                 spin_unlock_bh(&bp->ntp_fltr_lock);
9779                 rc = -ENOMEM;
9780                 goto err_free;
9781         }
9782
9783         new_fltr->sw_id = (u16)bit_id;
9784         new_fltr->flow_id = flow_id;
9785         new_fltr->l2_fltr_idx = l2_idx;
9786         new_fltr->rxq = rxq_index;
9787         hlist_add_head_rcu(&new_fltr->hash, head);
9788         bp->ntp_fltr_count++;
9789         spin_unlock_bh(&bp->ntp_fltr_lock);
9790
9791         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
9792         bnxt_queue_sp_work(bp);
9793
9794         return new_fltr->sw_id;
9795
9796 err_free:
9797         kfree(new_fltr);
9798         return rc;
9799 }
9800
9801 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9802 {
9803         int i;
9804
9805         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
9806                 struct hlist_head *head;
9807                 struct hlist_node *tmp;
9808                 struct bnxt_ntuple_filter *fltr;
9809                 int rc;
9810
9811                 head = &bp->ntp_fltr_hash_tbl[i];
9812                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
9813                         bool del = false;
9814
9815                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
9816                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
9817                                                         fltr->flow_id,
9818                                                         fltr->sw_id)) {
9819                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
9820                                                                          fltr);
9821                                         del = true;
9822                                 }
9823                         } else {
9824                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
9825                                                                        fltr);
9826                                 if (rc)
9827                                         del = true;
9828                                 else
9829                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
9830                         }
9831
9832                         if (del) {
9833                                 spin_lock_bh(&bp->ntp_fltr_lock);
9834                                 hlist_del_rcu(&fltr->hash);
9835                                 bp->ntp_fltr_count--;
9836                                 spin_unlock_bh(&bp->ntp_fltr_lock);
9837                                 synchronize_rcu();
9838                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
9839                                 kfree(fltr);
9840                         }
9841                 }
9842         }
9843         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9844                 netdev_info(bp->dev, "Receive PF driver unload event!");
9845 }
9846
9847 #else
9848
9849 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9850 {
9851 }
9852
9853 #endif /* CONFIG_RFS_ACCEL */
9854
9855 static void bnxt_udp_tunnel_add(struct net_device *dev,
9856                                 struct udp_tunnel_info *ti)
9857 {
9858         struct bnxt *bp = netdev_priv(dev);
9859
9860         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
9861                 return;
9862
9863         if (!netif_running(dev))
9864                 return;
9865
9866         switch (ti->type) {
9867         case UDP_TUNNEL_TYPE_VXLAN:
9868                 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
9869                         return;
9870
9871                 bp->vxlan_port_cnt++;
9872                 if (bp->vxlan_port_cnt == 1) {
9873                         bp->vxlan_port = ti->port;
9874                         set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
9875                         bnxt_queue_sp_work(bp);
9876                 }
9877                 break;
9878         case UDP_TUNNEL_TYPE_GENEVE:
9879                 if (bp->nge_port_cnt && bp->nge_port != ti->port)
9880                         return;
9881
9882                 bp->nge_port_cnt++;
9883                 if (bp->nge_port_cnt == 1) {
9884                         bp->nge_port = ti->port;
9885                         set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
9886                 }
9887                 break;
9888         default:
9889                 return;
9890         }
9891
9892         bnxt_queue_sp_work(bp);
9893 }
9894
9895 static void bnxt_udp_tunnel_del(struct net_device *dev,
9896                                 struct udp_tunnel_info *ti)
9897 {
9898         struct bnxt *bp = netdev_priv(dev);
9899
9900         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
9901                 return;
9902
9903         if (!netif_running(dev))
9904                 return;
9905
9906         switch (ti->type) {
9907         case UDP_TUNNEL_TYPE_VXLAN:
9908                 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
9909                         return;
9910                 bp->vxlan_port_cnt--;
9911
9912                 if (bp->vxlan_port_cnt != 0)
9913                         return;
9914
9915                 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
9916                 break;
9917         case UDP_TUNNEL_TYPE_GENEVE:
9918                 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
9919                         return;
9920                 bp->nge_port_cnt--;
9921
9922                 if (bp->nge_port_cnt != 0)
9923                         return;
9924
9925                 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
9926                 break;
9927         default:
9928                 return;
9929         }
9930
9931         bnxt_queue_sp_work(bp);
9932 }
9933
9934 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9935                                struct net_device *dev, u32 filter_mask,
9936                                int nlflags)
9937 {
9938         struct bnxt *bp = netdev_priv(dev);
9939
9940         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
9941                                        nlflags, filter_mask, NULL);
9942 }
9943
9944 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
9945                                u16 flags, struct netlink_ext_ack *extack)
9946 {
9947         struct bnxt *bp = netdev_priv(dev);
9948         struct nlattr *attr, *br_spec;
9949         int rem, rc = 0;
9950
9951         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
9952                 return -EOPNOTSUPP;
9953
9954         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9955         if (!br_spec)
9956                 return -EINVAL;
9957
9958         nla_for_each_nested(attr, br_spec, rem) {
9959                 u16 mode;
9960
9961                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9962                         continue;
9963
9964                 if (nla_len(attr) < sizeof(mode))
9965                         return -EINVAL;
9966
9967                 mode = nla_get_u16(attr);
9968                 if (mode == bp->br_mode)
9969                         break;
9970
9971                 rc = bnxt_hwrm_set_br_mode(bp, mode);
9972                 if (!rc)
9973                         bp->br_mode = mode;
9974                 break;
9975         }
9976         return rc;
9977 }
9978
9979 static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
9980                                    size_t len)
9981 {
9982         struct bnxt *bp = netdev_priv(dev);
9983         int rc;
9984
9985         /* The PF and it's VF-reps only support the switchdev framework */
9986         if (!BNXT_PF(bp))
9987                 return -EOPNOTSUPP;
9988
9989         rc = snprintf(buf, len, "p%d", bp->pf.port_id);
9990
9991         if (rc >= len)
9992                 return -EOPNOTSUPP;
9993         return 0;
9994 }
9995
9996 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
9997 {
9998         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
9999                 return -EOPNOTSUPP;
10000
10001         /* The PF and it's VF-reps only support the switchdev framework */
10002         if (!BNXT_PF(bp))
10003                 return -EOPNOTSUPP;
10004
10005         switch (attr->id) {
10006         case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
10007                 attr->u.ppid.id_len = sizeof(bp->switch_id);
10008                 memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
10009                 break;
10010         default:
10011                 return -EOPNOTSUPP;
10012         }
10013         return 0;
10014 }
10015
10016 static int bnxt_swdev_port_attr_get(struct net_device *dev,
10017                                     struct switchdev_attr *attr)
10018 {
10019         return bnxt_port_attr_get(netdev_priv(dev), attr);
10020 }
10021
10022 static const struct switchdev_ops bnxt_switchdev_ops = {
10023         .switchdev_port_attr_get        = bnxt_swdev_port_attr_get
10024 };
10025
10026 static const struct net_device_ops bnxt_netdev_ops = {
10027         .ndo_open               = bnxt_open,
10028         .ndo_start_xmit         = bnxt_start_xmit,
10029         .ndo_stop               = bnxt_close,
10030         .ndo_get_stats64        = bnxt_get_stats64,
10031         .ndo_set_rx_mode        = bnxt_set_rx_mode,
10032         .ndo_do_ioctl           = bnxt_ioctl,
10033         .ndo_validate_addr      = eth_validate_addr,
10034         .ndo_set_mac_address    = bnxt_change_mac_addr,
10035         .ndo_change_mtu         = bnxt_change_mtu,
10036         .ndo_fix_features       = bnxt_fix_features,
10037         .ndo_set_features       = bnxt_set_features,
10038         .ndo_tx_timeout         = bnxt_tx_timeout,
10039 #ifdef CONFIG_BNXT_SRIOV
10040         .ndo_get_vf_config      = bnxt_get_vf_config,
10041         .ndo_set_vf_mac         = bnxt_set_vf_mac,
10042         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
10043         .ndo_set_vf_rate        = bnxt_set_vf_bw,
10044         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
10045         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
10046         .ndo_set_vf_trust       = bnxt_set_vf_trust,
10047 #endif
10048         .ndo_setup_tc           = bnxt_setup_tc,
10049 #ifdef CONFIG_RFS_ACCEL
10050         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
10051 #endif
10052         .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
10053         .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
10054         .ndo_bpf                = bnxt_xdp,
10055         .ndo_bridge_getlink     = bnxt_bridge_getlink,
10056         .ndo_bridge_setlink     = bnxt_bridge_setlink,
10057         .ndo_get_phys_port_name = bnxt_get_phys_port_name
10058 };
10059
10060 static void bnxt_remove_one(struct pci_dev *pdev)
10061 {
10062         struct net_device *dev = pci_get_drvdata(pdev);
10063         struct bnxt *bp = netdev_priv(dev);
10064
10065         if (BNXT_PF(bp)) {
10066                 bnxt_sriov_disable(bp);
10067                 bnxt_dl_unregister(bp);
10068         }
10069
10070         pci_disable_pcie_error_reporting(pdev);
10071         unregister_netdev(dev);
10072         bnxt_shutdown_tc(bp);
10073         bnxt_cancel_sp_work(bp);
10074         bp->sp_event = 0;
10075
10076         bnxt_clear_int_mode(bp);
10077         bnxt_hwrm_func_drv_unrgtr(bp);
10078         bnxt_free_hwrm_resources(bp);
10079         bnxt_free_hwrm_short_cmd_req(bp);
10080         bnxt_ethtool_free(bp);
10081         bnxt_dcb_free(bp);
10082         kfree(bp->edev);
10083         bp->edev = NULL;
10084         bnxt_free_ctx_mem(bp);
10085         kfree(bp->ctx);
10086         bp->ctx = NULL;
10087         bnxt_cleanup_pci(bp);
10088         bnxt_free_port_stats(bp);
10089         free_netdev(dev);
10090 }
10091
10092 static int bnxt_probe_phy(struct bnxt *bp)
10093 {
10094         int rc = 0;
10095         struct bnxt_link_info *link_info = &bp->link_info;
10096
10097         rc = bnxt_hwrm_phy_qcaps(bp);
10098         if (rc) {
10099                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
10100                            rc);
10101                 return rc;
10102         }
10103         mutex_init(&bp->link_lock);
10104
10105         rc = bnxt_update_link(bp, false);
10106         if (rc) {
10107                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
10108                            rc);
10109                 return rc;
10110         }
10111
10112         /* Older firmware does not have supported_auto_speeds, so assume
10113          * that all supported speeds can be autonegotiated.
10114          */
10115         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
10116                 link_info->support_auto_speeds = link_info->support_speeds;
10117
10118         /*initialize the ethool setting copy with NVM settings */
10119         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
10120                 link_info->autoneg = BNXT_AUTONEG_SPEED;
10121                 if (bp->hwrm_spec_code >= 0x10201) {
10122                         if (link_info->auto_pause_setting &
10123                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10124                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10125                 } else {
10126                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10127                 }
10128                 link_info->advertising = link_info->auto_link_speeds;
10129         } else {
10130                 link_info->req_link_speed = link_info->force_link_speed;
10131                 link_info->req_duplex = link_info->duplex_setting;
10132         }
10133         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10134                 link_info->req_flow_ctrl =
10135                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10136         else
10137                 link_info->req_flow_ctrl = link_info->force_pause_setting;
10138         return rc;
10139 }
10140
10141 static int bnxt_get_max_irq(struct pci_dev *pdev)
10142 {
10143         u16 ctrl;
10144
10145         if (!pdev->msix_cap)
10146                 return 1;
10147
10148         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
10149         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
10150 }
10151
10152 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10153                                 int *max_cp)
10154 {
10155         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10156         int max_ring_grps = 0, max_irq;
10157
10158         *max_tx = hw_resc->max_tx_rings;
10159         *max_rx = hw_resc->max_rx_rings;
10160         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
10161         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
10162                         bnxt_get_ulp_msix_num(bp),
10163                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
10164         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10165                 *max_cp = min_t(int, *max_cp, max_irq);
10166         max_ring_grps = hw_resc->max_hw_ring_grps;
10167         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
10168                 *max_cp -= 1;
10169                 *max_rx -= 2;
10170         }
10171         if (bp->flags & BNXT_FLAG_AGG_RINGS)
10172                 *max_rx >>= 1;
10173         if (bp->flags & BNXT_FLAG_CHIP_P5) {
10174                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
10175                 /* On P5 chips, max_cp output param should be available NQs */
10176                 *max_cp = max_irq;
10177         }
10178         *max_rx = min_t(int, *max_rx, max_ring_grps);
10179 }
10180
10181 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
10182 {
10183         int rx, tx, cp;
10184
10185         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
10186         *max_rx = rx;
10187         *max_tx = tx;
10188         if (!rx || !tx || !cp)
10189                 return -ENOMEM;
10190
10191         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
10192 }
10193
10194 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10195                                bool shared)
10196 {
10197         int rc;
10198
10199         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
10200         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
10201                 /* Not enough rings, try disabling agg rings. */
10202                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
10203                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
10204                 if (rc) {
10205                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
10206                         bp->flags |= BNXT_FLAG_AGG_RINGS;
10207                         return rc;
10208                 }
10209                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
10210                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10211                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10212                 bnxt_set_ring_params(bp);
10213         }
10214
10215         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
10216                 int max_cp, max_stat, max_irq;
10217
10218                 /* Reserve minimum resources for RoCE */
10219                 max_cp = bnxt_get_max_func_cp_rings(bp);
10220                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
10221                 max_irq = bnxt_get_max_func_irqs(bp);
10222                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
10223                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
10224                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
10225                         return 0;
10226
10227                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
10228                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
10229                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
10230                 max_cp = min_t(int, max_cp, max_irq);
10231                 max_cp = min_t(int, max_cp, max_stat);
10232                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
10233                 if (rc)
10234                         rc = 0;
10235         }
10236         return rc;
10237 }
10238
10239 /* In initial default shared ring setting, each shared ring must have a
10240  * RX/TX ring pair.
10241  */
10242 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
10243 {
10244         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
10245         bp->rx_nr_rings = bp->cp_nr_rings;
10246         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
10247         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10248 }
10249
10250 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
10251 {
10252         int dflt_rings, max_rx_rings, max_tx_rings, rc;
10253
10254         if (!bnxt_can_reserve_rings(bp))
10255                 return 0;
10256
10257         if (sh)
10258                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
10259         dflt_rings = netif_get_num_default_rss_queues();
10260         /* Reduce default rings on multi-port cards so that total default
10261          * rings do not exceed CPU count.
10262          */
10263         if (bp->port_count > 1) {
10264                 int max_rings =
10265                         max_t(int, num_online_cpus() / bp->port_count, 1);
10266
10267                 dflt_rings = min_t(int, dflt_rings, max_rings);
10268         }
10269         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
10270         if (rc)
10271                 return rc;
10272         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
10273         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
10274         if (sh)
10275                 bnxt_trim_dflt_sh_rings(bp);
10276         else
10277                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
10278         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10279
10280         rc = __bnxt_reserve_rings(bp);
10281         if (rc)
10282                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
10283         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10284         if (sh)
10285                 bnxt_trim_dflt_sh_rings(bp);
10286
10287         /* Rings may have been trimmed, re-reserve the trimmed rings. */
10288         if (bnxt_need_reserve_rings(bp)) {
10289                 rc = __bnxt_reserve_rings(bp);
10290                 if (rc)
10291                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
10292                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10293         }
10294         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10295                 bp->rx_nr_rings++;
10296                 bp->cp_nr_rings++;
10297         }
10298         return rc;
10299 }
10300
10301 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
10302 {
10303         int rc;
10304
10305         if (bp->tx_nr_rings)
10306                 return 0;
10307
10308         bnxt_ulp_irq_stop(bp);
10309         bnxt_clear_int_mode(bp);
10310         rc = bnxt_set_dflt_rings(bp, true);
10311         if (rc) {
10312                 netdev_err(bp->dev, "Not enough rings available.\n");
10313                 goto init_dflt_ring_err;
10314         }
10315         rc = bnxt_init_int_mode(bp);
10316         if (rc)
10317                 goto init_dflt_ring_err;
10318
10319         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10320         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
10321                 bp->flags |= BNXT_FLAG_RFS;
10322                 bp->dev->features |= NETIF_F_NTUPLE;
10323         }
10324 init_dflt_ring_err:
10325         bnxt_ulp_irq_restart(bp, rc);
10326         return rc;
10327 }
10328
10329 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
10330 {
10331         int rc;
10332
10333         ASSERT_RTNL();
10334         bnxt_hwrm_func_qcaps(bp);
10335
10336         if (netif_running(bp->dev))
10337                 __bnxt_close_nic(bp, true, false);
10338
10339         bnxt_ulp_irq_stop(bp);
10340         bnxt_clear_int_mode(bp);
10341         rc = bnxt_init_int_mode(bp);
10342         bnxt_ulp_irq_restart(bp, rc);
10343
10344         if (netif_running(bp->dev)) {
10345                 if (rc)
10346                         dev_close(bp->dev);
10347                 else
10348                         rc = bnxt_open_nic(bp, true, false);
10349         }
10350
10351         return rc;
10352 }
10353
10354 static int bnxt_init_mac_addr(struct bnxt *bp)
10355 {
10356         int rc = 0;
10357
10358         if (BNXT_PF(bp)) {
10359                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
10360         } else {
10361 #ifdef CONFIG_BNXT_SRIOV
10362                 struct bnxt_vf_info *vf = &bp->vf;
10363                 bool strict_approval = true;
10364
10365                 if (is_valid_ether_addr(vf->mac_addr)) {
10366                         /* overwrite netdev dev_addr with admin VF MAC */
10367                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
10368                         /* Older PF driver or firmware may not approve this
10369                          * correctly.
10370                          */
10371                         strict_approval = false;
10372                 } else {
10373                         eth_hw_addr_random(bp->dev);
10374                 }
10375                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
10376 #endif
10377         }
10378         return rc;
10379 }
10380
10381 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10382 {
10383         static int version_printed;
10384         struct net_device *dev;
10385         struct bnxt *bp;
10386         int rc, max_irqs;
10387
10388         if (pci_is_bridge(pdev))
10389                 return -ENODEV;
10390
10391         if (version_printed++ == 0)
10392                 pr_info("%s", version);
10393
10394         max_irqs = bnxt_get_max_irq(pdev);
10395         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
10396         if (!dev)
10397                 return -ENOMEM;
10398
10399         bp = netdev_priv(dev);
10400         bnxt_set_max_func_irqs(bp, max_irqs);
10401
10402         if (bnxt_vf_pciid(ent->driver_data))
10403                 bp->flags |= BNXT_FLAG_VF;
10404
10405         if (pdev->msix_cap)
10406                 bp->flags |= BNXT_FLAG_MSIX_CAP;
10407
10408         rc = bnxt_init_board(pdev, dev);
10409         if (rc < 0)
10410                 goto init_err_free;
10411
10412         dev->netdev_ops = &bnxt_netdev_ops;
10413         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
10414         dev->ethtool_ops = &bnxt_ethtool_ops;
10415         SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
10416         pci_set_drvdata(pdev, dev);
10417
10418         rc = bnxt_alloc_hwrm_resources(bp);
10419         if (rc)
10420                 goto init_err_pci_clean;
10421
10422         mutex_init(&bp->hwrm_cmd_lock);
10423         rc = bnxt_hwrm_ver_get(bp);
10424         if (rc)
10425                 goto init_err_pci_clean;
10426
10427         if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10428                 rc = bnxt_alloc_kong_hwrm_resources(bp);
10429                 if (rc)
10430                         bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10431         }
10432
10433         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10434             bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10435                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10436                 if (rc)
10437                         goto init_err_pci_clean;
10438         }
10439
10440         if (BNXT_CHIP_P5(bp))
10441                 bp->flags |= BNXT_FLAG_CHIP_P5;
10442
10443         rc = bnxt_hwrm_func_reset(bp);
10444         if (rc)
10445                 goto init_err_pci_clean;
10446
10447         bnxt_hwrm_fw_set_time(bp);
10448
10449         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10450                            NETIF_F_TSO | NETIF_F_TSO6 |
10451                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
10452                            NETIF_F_GSO_IPXIP4 |
10453                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10454                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
10455                            NETIF_F_RXCSUM | NETIF_F_GRO;
10456
10457         if (BNXT_SUPPORTS_TPA(bp))
10458                 dev->hw_features |= NETIF_F_LRO;
10459
10460         dev->hw_enc_features =
10461                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10462                         NETIF_F_TSO | NETIF_F_TSO6 |
10463                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
10464                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10465                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
10466         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
10467                                     NETIF_F_GSO_GRE_CSUM;
10468         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
10469         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
10470                             NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
10471         if (BNXT_SUPPORTS_TPA(bp))
10472                 dev->hw_features |= NETIF_F_GRO_HW;
10473         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
10474         if (dev->features & NETIF_F_GRO_HW)
10475                 dev->features &= ~NETIF_F_LRO;
10476         dev->priv_flags |= IFF_UNICAST_FLT;
10477
10478 #ifdef CONFIG_BNXT_SRIOV
10479         init_waitqueue_head(&bp->sriov_cfg_wait);
10480         mutex_init(&bp->sriov_lock);
10481 #endif
10482         if (BNXT_SUPPORTS_TPA(bp)) {
10483                 bp->gro_func = bnxt_gro_func_5730x;
10484                 if (BNXT_CHIP_P4(bp))
10485                         bp->gro_func = bnxt_gro_func_5731x;
10486         }
10487         if (!BNXT_CHIP_P4_PLUS(bp))
10488                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
10489
10490         rc = bnxt_hwrm_func_drv_rgtr(bp);
10491         if (rc)
10492                 goto init_err_pci_clean;
10493
10494         rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
10495         if (rc)
10496                 goto init_err_pci_clean;
10497
10498         bp->ulp_probe = bnxt_ulp_probe;
10499
10500         rc = bnxt_hwrm_queue_qportcfg(bp);
10501         if (rc) {
10502                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
10503                            rc);
10504                 rc = -1;
10505                 goto init_err_pci_clean;
10506         }
10507         /* Get the MAX capabilities for this function */
10508         rc = bnxt_hwrm_func_qcaps(bp);
10509         if (rc) {
10510                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10511                            rc);
10512                 rc = -1;
10513                 goto init_err_pci_clean;
10514         }
10515         rc = bnxt_init_mac_addr(bp);
10516         if (rc) {
10517                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
10518                 rc = -EADDRNOTAVAIL;
10519                 goto init_err_pci_clean;
10520         }
10521
10522         bnxt_hwrm_func_qcfg(bp);
10523         bnxt_hwrm_vnic_qcaps(bp);
10524         bnxt_hwrm_port_led_qcaps(bp);
10525         bnxt_ethtool_init(bp);
10526         bnxt_dcb_init(bp);
10527
10528         /* MTU range: 60 - FW defined max */
10529         dev->min_mtu = ETH_ZLEN;
10530         dev->max_mtu = bp->max_mtu;
10531
10532         rc = bnxt_probe_phy(bp);
10533         if (rc)
10534                 goto init_err_pci_clean;
10535
10536         bnxt_set_rx_skb_mode(bp, false);
10537         bnxt_set_tpa_flags(bp);
10538         bnxt_set_ring_params(bp);
10539         rc = bnxt_set_dflt_rings(bp, true);
10540         if (rc) {
10541                 netdev_err(bp->dev, "Not enough rings available.\n");
10542                 rc = -ENOMEM;
10543                 goto init_err_pci_clean;
10544         }
10545
10546         /* Default RSS hash cfg. */
10547         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10548                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10549                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10550                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
10551         if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
10552                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10553                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10554                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10555         }
10556
10557         if (bnxt_rfs_supported(bp)) {
10558                 dev->hw_features |= NETIF_F_NTUPLE;
10559                 if (bnxt_rfs_capable(bp)) {
10560                         bp->flags |= BNXT_FLAG_RFS;
10561                         dev->features |= NETIF_F_NTUPLE;
10562                 }
10563         }
10564
10565         if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
10566                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
10567
10568         rc = bnxt_init_int_mode(bp);
10569         if (rc)
10570                 goto init_err_pci_clean;
10571
10572         /* No TC has been set yet and rings may have been trimmed due to
10573          * limited MSIX, so we re-initialize the TX rings per TC.
10574          */
10575         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10576
10577         bnxt_get_wol_settings(bp);
10578         if (bp->flags & BNXT_FLAG_WOL_CAP)
10579                 device_set_wakeup_enable(&pdev->dev, bp->wol);
10580         else
10581                 device_set_wakeup_capable(&pdev->dev, false);
10582
10583         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10584
10585         bnxt_hwrm_coal_params_qcaps(bp);
10586
10587         if (BNXT_PF(bp)) {
10588                 if (!bnxt_pf_wq) {
10589                         bnxt_pf_wq =
10590                                 create_singlethread_workqueue("bnxt_pf_wq");
10591                         if (!bnxt_pf_wq) {
10592                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
10593                                 goto init_err_pci_clean;
10594                         }
10595                 }
10596                 bnxt_init_tc(bp);
10597         }
10598
10599         rc = register_netdev(dev);
10600         if (rc)
10601                 goto init_err_cleanup_tc;
10602
10603         if (BNXT_PF(bp))
10604                 bnxt_dl_register(bp);
10605
10606         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
10607                     board_info[ent->driver_data].name,
10608                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
10609         pcie_print_link_status(pdev);
10610
10611         return 0;
10612
10613 init_err_cleanup_tc:
10614         bnxt_shutdown_tc(bp);
10615         bnxt_clear_int_mode(bp);
10616
10617 init_err_pci_clean:
10618         bnxt_free_hwrm_resources(bp);
10619         bnxt_free_ctx_mem(bp);
10620         kfree(bp->ctx);
10621         bp->ctx = NULL;
10622         bnxt_cleanup_pci(bp);
10623
10624 init_err_free:
10625         free_netdev(dev);
10626         return rc;
10627 }
10628
10629 static void bnxt_shutdown(struct pci_dev *pdev)
10630 {
10631         struct net_device *dev = pci_get_drvdata(pdev);
10632         struct bnxt *bp;
10633
10634         if (!dev)
10635                 return;
10636
10637         rtnl_lock();
10638         bp = netdev_priv(dev);
10639         if (!bp)
10640                 goto shutdown_exit;
10641
10642         if (netif_running(dev))
10643                 dev_close(dev);
10644
10645         bnxt_ulp_shutdown(bp);
10646
10647         if (system_state == SYSTEM_POWER_OFF) {
10648                 bnxt_clear_int_mode(bp);
10649                 pci_wake_from_d3(pdev, bp->wol);
10650                 pci_set_power_state(pdev, PCI_D3hot);
10651         }
10652
10653 shutdown_exit:
10654         rtnl_unlock();
10655 }
10656
10657 #ifdef CONFIG_PM_SLEEP
10658 static int bnxt_suspend(struct device *device)
10659 {
10660         struct pci_dev *pdev = to_pci_dev(device);
10661         struct net_device *dev = pci_get_drvdata(pdev);
10662         struct bnxt *bp = netdev_priv(dev);
10663         int rc = 0;
10664
10665         rtnl_lock();
10666         if (netif_running(dev)) {
10667                 netif_device_detach(dev);
10668                 rc = bnxt_close(dev);
10669         }
10670         bnxt_hwrm_func_drv_unrgtr(bp);
10671         rtnl_unlock();
10672         return rc;
10673 }
10674
10675 static int bnxt_resume(struct device *device)
10676 {
10677         struct pci_dev *pdev = to_pci_dev(device);
10678         struct net_device *dev = pci_get_drvdata(pdev);
10679         struct bnxt *bp = netdev_priv(dev);
10680         int rc = 0;
10681
10682         rtnl_lock();
10683         if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
10684                 rc = -ENODEV;
10685                 goto resume_exit;
10686         }
10687         rc = bnxt_hwrm_func_reset(bp);
10688         if (rc) {
10689                 rc = -EBUSY;
10690                 goto resume_exit;
10691         }
10692         bnxt_get_wol_settings(bp);
10693         if (netif_running(dev)) {
10694                 rc = bnxt_open(dev);
10695                 if (!rc)
10696                         netif_device_attach(dev);
10697         }
10698
10699 resume_exit:
10700         rtnl_unlock();
10701         return rc;
10702 }
10703
10704 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
10705 #define BNXT_PM_OPS (&bnxt_pm_ops)
10706
10707 #else
10708
10709 #define BNXT_PM_OPS NULL
10710
10711 #endif /* CONFIG_PM_SLEEP */
10712
10713 /**
10714  * bnxt_io_error_detected - called when PCI error is detected
10715  * @pdev: Pointer to PCI device
10716  * @state: The current pci connection state
10717  *
10718  * This function is called after a PCI bus error affecting
10719  * this device has been detected.
10720  */
10721 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
10722                                                pci_channel_state_t state)
10723 {
10724         struct net_device *netdev = pci_get_drvdata(pdev);
10725         struct bnxt *bp = netdev_priv(netdev);
10726
10727         netdev_info(netdev, "PCI I/O error detected\n");
10728
10729         rtnl_lock();
10730         netif_device_detach(netdev);
10731
10732         bnxt_ulp_stop(bp);
10733
10734         if (state == pci_channel_io_perm_failure) {
10735                 rtnl_unlock();
10736                 return PCI_ERS_RESULT_DISCONNECT;
10737         }
10738
10739         if (netif_running(netdev))
10740                 bnxt_close(netdev);
10741
10742         pci_disable_device(pdev);
10743         rtnl_unlock();
10744
10745         /* Request a slot slot reset. */
10746         return PCI_ERS_RESULT_NEED_RESET;
10747 }
10748
10749 /**
10750  * bnxt_io_slot_reset - called after the pci bus has been reset.
10751  * @pdev: Pointer to PCI device
10752  *
10753  * Restart the card from scratch, as if from a cold-boot.
10754  * At this point, the card has exprienced a hard reset,
10755  * followed by fixups by BIOS, and has its config space
10756  * set up identically to what it was at cold boot.
10757  */
10758 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
10759 {
10760         struct net_device *netdev = pci_get_drvdata(pdev);
10761         struct bnxt *bp = netdev_priv(netdev);
10762         int err = 0;
10763         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
10764
10765         netdev_info(bp->dev, "PCI Slot Reset\n");
10766
10767         rtnl_lock();
10768
10769         if (pci_enable_device(pdev)) {
10770                 dev_err(&pdev->dev,
10771                         "Cannot re-enable PCI device after reset.\n");
10772         } else {
10773                 pci_set_master(pdev);
10774
10775                 err = bnxt_hwrm_func_reset(bp);
10776                 if (!err && netif_running(netdev))
10777                         err = bnxt_open(netdev);
10778
10779                 if (!err) {
10780                         result = PCI_ERS_RESULT_RECOVERED;
10781                         bnxt_ulp_start(bp);
10782                 }
10783         }
10784
10785         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
10786                 dev_close(netdev);
10787
10788         rtnl_unlock();
10789
10790         return PCI_ERS_RESULT_RECOVERED;
10791 }
10792
10793 /**
10794  * bnxt_io_resume - called when traffic can start flowing again.
10795  * @pdev: Pointer to PCI device
10796  *
10797  * This callback is called when the error recovery driver tells
10798  * us that its OK to resume normal operation.
10799  */
10800 static void bnxt_io_resume(struct pci_dev *pdev)
10801 {
10802         struct net_device *netdev = pci_get_drvdata(pdev);
10803
10804         rtnl_lock();
10805
10806         netif_device_attach(netdev);
10807
10808         rtnl_unlock();
10809 }
10810
10811 static const struct pci_error_handlers bnxt_err_handler = {
10812         .error_detected = bnxt_io_error_detected,
10813         .slot_reset     = bnxt_io_slot_reset,
10814         .resume         = bnxt_io_resume
10815 };
10816
10817 static struct pci_driver bnxt_pci_driver = {
10818         .name           = DRV_MODULE_NAME,
10819         .id_table       = bnxt_pci_tbl,
10820         .probe          = bnxt_init_one,
10821         .remove         = bnxt_remove_one,
10822         .shutdown       = bnxt_shutdown,
10823         .driver.pm      = BNXT_PM_OPS,
10824         .err_handler    = &bnxt_err_handler,
10825 #if defined(CONFIG_BNXT_SRIOV)
10826         .sriov_configure = bnxt_sriov_configure,
10827 #endif
10828 };
10829
10830 static int __init bnxt_init(void)
10831 {
10832         bnxt_debug_init();
10833         return pci_register_driver(&bnxt_pci_driver);
10834 }
10835
10836 static void __exit bnxt_exit(void)
10837 {
10838         pci_unregister_driver(&bnxt_pci_driver);
10839         if (bnxt_pf_wq)
10840                 destroy_workqueue(bnxt_pf_wq);
10841         bnxt_debug_exit();
10842 }
10843
10844 module_init(bnxt_init);
10845 module_exit(bnxt_exit);