bnxt_en: Wait longer for the firmware message response to complete.
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2018 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/if.h>
35 #include <linux/if_vlan.h>
36 #include <linux/if_bridge.h>
37 #include <linux/rtc.h>
38 #include <linux/bpf.h>
39 #include <net/ip.h>
40 #include <net/tcp.h>
41 #include <net/udp.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <net/udp_tunnel.h>
45 #include <linux/workqueue.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50 #include <linux/bitmap.h>
51 #include <linux/cpu_rmap.h>
52 #include <linux/cpumask.h>
53 #include <net/pkt_cls.h>
54 #include <linux/hwmon.h>
55 #include <linux/hwmon-sysfs.h>
56
57 #include "bnxt_hsi.h"
58 #include "bnxt.h"
59 #include "bnxt_ulp.h"
60 #include "bnxt_sriov.h"
61 #include "bnxt_ethtool.h"
62 #include "bnxt_dcb.h"
63 #include "bnxt_xdp.h"
64 #include "bnxt_vfr.h"
65 #include "bnxt_tc.h"
66 #include "bnxt_devlink.h"
67 #include "bnxt_debugfs.h"
68
69 #define BNXT_TX_TIMEOUT         (5 * HZ)
70
71 static const char version[] =
72         "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
73
74 MODULE_LICENSE("GPL");
75 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77
78 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
79 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
80 #define BNXT_RX_COPY_THRESH 256
81
82 #define BNXT_TX_PUSH_THRESH 164
83
84 enum board_idx {
85         BCM57301,
86         BCM57302,
87         BCM57304,
88         BCM57417_NPAR,
89         BCM58700,
90         BCM57311,
91         BCM57312,
92         BCM57402,
93         BCM57404,
94         BCM57406,
95         BCM57402_NPAR,
96         BCM57407,
97         BCM57412,
98         BCM57414,
99         BCM57416,
100         BCM57417,
101         BCM57412_NPAR,
102         BCM57314,
103         BCM57417_SFP,
104         BCM57416_SFP,
105         BCM57404_NPAR,
106         BCM57406_NPAR,
107         BCM57407_SFP,
108         BCM57407_NPAR,
109         BCM57414_NPAR,
110         BCM57416_NPAR,
111         BCM57452,
112         BCM57454,
113         BCM5745x_NPAR,
114         BCM57508,
115         BCM58802,
116         BCM58804,
117         BCM58808,
118         NETXTREME_E_VF,
119         NETXTREME_C_VF,
120         NETXTREME_S_VF,
121         NETXTREME_E_P5_VF,
122 };
123
124 /* indexed by enum above */
125 static const struct {
126         char *name;
127 } board_info[] = {
128         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
129         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
130         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
131         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
132         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
133         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
134         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
135         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
136         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
137         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
138         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
139         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
140         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
141         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
142         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
143         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
144         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
145         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
146         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
147         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
148         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
149         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
150         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
151         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
152         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
153         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
154         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
155         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
156         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
157         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
158         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
159         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
160         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
161         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
162         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
163         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
164         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
165 };
166
167 static const struct pci_device_id bnxt_pci_tbl[] = {
168         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
169         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
170         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
171         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
172         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
173         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
174         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
175         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
176         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
177         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
178         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
179         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
180         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
181         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
182         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
183         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
184         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
185         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
186         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
187         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
188         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
189         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
190         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
191         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
192         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
193         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
194         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
195         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
196         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
197         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
198         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
199         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
200         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
201         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
202         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
203         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
204         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
205         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
206 #ifdef CONFIG_BNXT_SRIOV
207         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
208         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
209         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
210         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
211         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
212         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
213         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
214         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
215         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
216         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
217 #endif
218         { 0 }
219 };
220
221 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
222
223 static const u16 bnxt_vf_req_snif[] = {
224         HWRM_FUNC_CFG,
225         HWRM_FUNC_VF_CFG,
226         HWRM_PORT_PHY_QCFG,
227         HWRM_CFA_L2_FILTER_ALLOC,
228 };
229
230 static const u16 bnxt_async_events_arr[] = {
231         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
232         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
233         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
234         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
235         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
236 };
237
238 static struct workqueue_struct *bnxt_pf_wq;
239
240 static bool bnxt_vf_pciid(enum board_idx idx)
241 {
242         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
243                 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
244 }
245
246 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
247 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
248 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
249
250 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
251                 writel(DB_CP_IRQ_DIS_FLAGS, db)
252
253 #define BNXT_DB_CQ(db, idx)                                             \
254         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
255
256 #define BNXT_DB_NQ_P5(db, idx)                                          \
257         writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
258
259 #define BNXT_DB_CQ_ARM(db, idx)                                         \
260         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
261
262 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
263         writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
264
265 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
266 {
267         if (bp->flags & BNXT_FLAG_CHIP_P5)
268                 BNXT_DB_NQ_P5(db, idx);
269         else
270                 BNXT_DB_CQ(db, idx);
271 }
272
273 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
274 {
275         if (bp->flags & BNXT_FLAG_CHIP_P5)
276                 BNXT_DB_NQ_ARM_P5(db, idx);
277         else
278                 BNXT_DB_CQ_ARM(db, idx);
279 }
280
281 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
282 {
283         if (bp->flags & BNXT_FLAG_CHIP_P5)
284                 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
285                        db->doorbell);
286         else
287                 BNXT_DB_CQ(db, idx);
288 }
289
290 const u16 bnxt_lhint_arr[] = {
291         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
292         TX_BD_FLAGS_LHINT_512_TO_1023,
293         TX_BD_FLAGS_LHINT_1024_TO_2047,
294         TX_BD_FLAGS_LHINT_1024_TO_2047,
295         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
296         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
297         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
298         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
299         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
300         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
301         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
308         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
309         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
310 };
311
312 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
313 {
314         struct metadata_dst *md_dst = skb_metadata_dst(skb);
315
316         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
317                 return 0;
318
319         return md_dst->u.port_info.port_id;
320 }
321
322 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
323 {
324         struct bnxt *bp = netdev_priv(dev);
325         struct tx_bd *txbd;
326         struct tx_bd_ext *txbd1;
327         struct netdev_queue *txq;
328         int i;
329         dma_addr_t mapping;
330         unsigned int length, pad = 0;
331         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
332         u16 prod, last_frag;
333         struct pci_dev *pdev = bp->pdev;
334         struct bnxt_tx_ring_info *txr;
335         struct bnxt_sw_tx_bd *tx_buf;
336
337         i = skb_get_queue_mapping(skb);
338         if (unlikely(i >= bp->tx_nr_rings)) {
339                 dev_kfree_skb_any(skb);
340                 return NETDEV_TX_OK;
341         }
342
343         txq = netdev_get_tx_queue(dev, i);
344         txr = &bp->tx_ring[bp->tx_ring_map[i]];
345         prod = txr->tx_prod;
346
347         free_size = bnxt_tx_avail(bp, txr);
348         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
349                 netif_tx_stop_queue(txq);
350                 return NETDEV_TX_BUSY;
351         }
352
353         length = skb->len;
354         len = skb_headlen(skb);
355         last_frag = skb_shinfo(skb)->nr_frags;
356
357         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
358
359         txbd->tx_bd_opaque = prod;
360
361         tx_buf = &txr->tx_buf_ring[prod];
362         tx_buf->skb = skb;
363         tx_buf->nr_frags = last_frag;
364
365         vlan_tag_flags = 0;
366         cfa_action = bnxt_xmit_get_cfa_action(skb);
367         if (skb_vlan_tag_present(skb)) {
368                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
369                                  skb_vlan_tag_get(skb);
370                 /* Currently supports 8021Q, 8021AD vlan offloads
371                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
372                  */
373                 if (skb->vlan_proto == htons(ETH_P_8021Q))
374                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
375         }
376
377         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
378                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
379                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
380                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
381                 void __iomem *db = txr->tx_db.doorbell;
382                 void *pdata = tx_push_buf->data;
383                 u64 *end;
384                 int j, push_len;
385
386                 /* Set COAL_NOW to be ready quickly for the next push */
387                 tx_push->tx_bd_len_flags_type =
388                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
389                                         TX_BD_TYPE_LONG_TX_BD |
390                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
391                                         TX_BD_FLAGS_COAL_NOW |
392                                         TX_BD_FLAGS_PACKET_END |
393                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
394
395                 if (skb->ip_summed == CHECKSUM_PARTIAL)
396                         tx_push1->tx_bd_hsize_lflags =
397                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
398                 else
399                         tx_push1->tx_bd_hsize_lflags = 0;
400
401                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
402                 tx_push1->tx_bd_cfa_action =
403                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
404
405                 end = pdata + length;
406                 end = PTR_ALIGN(end, 8) - 1;
407                 *end = 0;
408
409                 skb_copy_from_linear_data(skb, pdata, len);
410                 pdata += len;
411                 for (j = 0; j < last_frag; j++) {
412                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
413                         void *fptr;
414
415                         fptr = skb_frag_address_safe(frag);
416                         if (!fptr)
417                                 goto normal_tx;
418
419                         memcpy(pdata, fptr, skb_frag_size(frag));
420                         pdata += skb_frag_size(frag);
421                 }
422
423                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
424                 txbd->tx_bd_haddr = txr->data_mapping;
425                 prod = NEXT_TX(prod);
426                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
427                 memcpy(txbd, tx_push1, sizeof(*txbd));
428                 prod = NEXT_TX(prod);
429                 tx_push->doorbell =
430                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
431                 txr->tx_prod = prod;
432
433                 tx_buf->is_push = 1;
434                 netdev_tx_sent_queue(txq, skb->len);
435                 wmb();  /* Sync is_push and byte queue before pushing data */
436
437                 push_len = (length + sizeof(*tx_push) + 7) / 8;
438                 if (push_len > 16) {
439                         __iowrite64_copy(db, tx_push_buf, 16);
440                         __iowrite32_copy(db + 4, tx_push_buf + 1,
441                                          (push_len - 16) << 1);
442                 } else {
443                         __iowrite64_copy(db, tx_push_buf, push_len);
444                 }
445
446                 goto tx_done;
447         }
448
449 normal_tx:
450         if (length < BNXT_MIN_PKT_SIZE) {
451                 pad = BNXT_MIN_PKT_SIZE - length;
452                 if (skb_pad(skb, pad)) {
453                         /* SKB already freed. */
454                         tx_buf->skb = NULL;
455                         return NETDEV_TX_OK;
456                 }
457                 length = BNXT_MIN_PKT_SIZE;
458         }
459
460         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
461
462         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
463                 dev_kfree_skb_any(skb);
464                 tx_buf->skb = NULL;
465                 return NETDEV_TX_OK;
466         }
467
468         dma_unmap_addr_set(tx_buf, mapping, mapping);
469         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
470                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
471
472         txbd->tx_bd_haddr = cpu_to_le64(mapping);
473
474         prod = NEXT_TX(prod);
475         txbd1 = (struct tx_bd_ext *)
476                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
477
478         txbd1->tx_bd_hsize_lflags = 0;
479         if (skb_is_gso(skb)) {
480                 u32 hdr_len;
481
482                 if (skb->encapsulation)
483                         hdr_len = skb_inner_network_offset(skb) +
484                                 skb_inner_network_header_len(skb) +
485                                 inner_tcp_hdrlen(skb);
486                 else
487                         hdr_len = skb_transport_offset(skb) +
488                                 tcp_hdrlen(skb);
489
490                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
491                                         TX_BD_FLAGS_T_IPID |
492                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
493                 length = skb_shinfo(skb)->gso_size;
494                 txbd1->tx_bd_mss = cpu_to_le32(length);
495                 length += hdr_len;
496         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
497                 txbd1->tx_bd_hsize_lflags =
498                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
499                 txbd1->tx_bd_mss = 0;
500         }
501
502         length >>= 9;
503         flags |= bnxt_lhint_arr[length];
504         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
505
506         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
507         txbd1->tx_bd_cfa_action =
508                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
509         for (i = 0; i < last_frag; i++) {
510                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
511
512                 prod = NEXT_TX(prod);
513                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
514
515                 len = skb_frag_size(frag);
516                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
517                                            DMA_TO_DEVICE);
518
519                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
520                         goto tx_dma_error;
521
522                 tx_buf = &txr->tx_buf_ring[prod];
523                 dma_unmap_addr_set(tx_buf, mapping, mapping);
524
525                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
526
527                 flags = len << TX_BD_LEN_SHIFT;
528                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
529         }
530
531         flags &= ~TX_BD_LEN;
532         txbd->tx_bd_len_flags_type =
533                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
534                             TX_BD_FLAGS_PACKET_END);
535
536         netdev_tx_sent_queue(txq, skb->len);
537
538         /* Sync BD data before updating doorbell */
539         wmb();
540
541         prod = NEXT_TX(prod);
542         txr->tx_prod = prod;
543
544         if (!skb->xmit_more || netif_xmit_stopped(txq))
545                 bnxt_db_write(bp, &txr->tx_db, prod);
546
547 tx_done:
548
549         mmiowb();
550
551         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
552                 if (skb->xmit_more && !tx_buf->is_push)
553                         bnxt_db_write(bp, &txr->tx_db, prod);
554
555                 netif_tx_stop_queue(txq);
556
557                 /* netif_tx_stop_queue() must be done before checking
558                  * tx index in bnxt_tx_avail() below, because in
559                  * bnxt_tx_int(), we update tx index before checking for
560                  * netif_tx_queue_stopped().
561                  */
562                 smp_mb();
563                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
564                         netif_tx_wake_queue(txq);
565         }
566         return NETDEV_TX_OK;
567
568 tx_dma_error:
569         last_frag = i;
570
571         /* start back at beginning and unmap skb */
572         prod = txr->tx_prod;
573         tx_buf = &txr->tx_buf_ring[prod];
574         tx_buf->skb = NULL;
575         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
576                          skb_headlen(skb), PCI_DMA_TODEVICE);
577         prod = NEXT_TX(prod);
578
579         /* unmap remaining mapped pages */
580         for (i = 0; i < last_frag; i++) {
581                 prod = NEXT_TX(prod);
582                 tx_buf = &txr->tx_buf_ring[prod];
583                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
584                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
585                                PCI_DMA_TODEVICE);
586         }
587
588         dev_kfree_skb_any(skb);
589         return NETDEV_TX_OK;
590 }
591
592 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
593 {
594         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
595         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
596         u16 cons = txr->tx_cons;
597         struct pci_dev *pdev = bp->pdev;
598         int i;
599         unsigned int tx_bytes = 0;
600
601         for (i = 0; i < nr_pkts; i++) {
602                 struct bnxt_sw_tx_bd *tx_buf;
603                 struct sk_buff *skb;
604                 int j, last;
605
606                 tx_buf = &txr->tx_buf_ring[cons];
607                 cons = NEXT_TX(cons);
608                 skb = tx_buf->skb;
609                 tx_buf->skb = NULL;
610
611                 if (tx_buf->is_push) {
612                         tx_buf->is_push = 0;
613                         goto next_tx_int;
614                 }
615
616                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
617                                  skb_headlen(skb), PCI_DMA_TODEVICE);
618                 last = tx_buf->nr_frags;
619
620                 for (j = 0; j < last; j++) {
621                         cons = NEXT_TX(cons);
622                         tx_buf = &txr->tx_buf_ring[cons];
623                         dma_unmap_page(
624                                 &pdev->dev,
625                                 dma_unmap_addr(tx_buf, mapping),
626                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
627                                 PCI_DMA_TODEVICE);
628                 }
629
630 next_tx_int:
631                 cons = NEXT_TX(cons);
632
633                 tx_bytes += skb->len;
634                 dev_kfree_skb_any(skb);
635         }
636
637         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
638         txr->tx_cons = cons;
639
640         /* Need to make the tx_cons update visible to bnxt_start_xmit()
641          * before checking for netif_tx_queue_stopped().  Without the
642          * memory barrier, there is a small possibility that bnxt_start_xmit()
643          * will miss it and cause the queue to be stopped forever.
644          */
645         smp_mb();
646
647         if (unlikely(netif_tx_queue_stopped(txq)) &&
648             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
649                 __netif_tx_lock(txq, smp_processor_id());
650                 if (netif_tx_queue_stopped(txq) &&
651                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
652                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
653                         netif_tx_wake_queue(txq);
654                 __netif_tx_unlock(txq);
655         }
656 }
657
658 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
659                                          gfp_t gfp)
660 {
661         struct device *dev = &bp->pdev->dev;
662         struct page *page;
663
664         page = alloc_page(gfp);
665         if (!page)
666                 return NULL;
667
668         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
669                                       DMA_ATTR_WEAK_ORDERING);
670         if (dma_mapping_error(dev, *mapping)) {
671                 __free_page(page);
672                 return NULL;
673         }
674         *mapping += bp->rx_dma_offset;
675         return page;
676 }
677
678 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
679                                        gfp_t gfp)
680 {
681         u8 *data;
682         struct pci_dev *pdev = bp->pdev;
683
684         data = kmalloc(bp->rx_buf_size, gfp);
685         if (!data)
686                 return NULL;
687
688         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
689                                         bp->rx_buf_use_size, bp->rx_dir,
690                                         DMA_ATTR_WEAK_ORDERING);
691
692         if (dma_mapping_error(&pdev->dev, *mapping)) {
693                 kfree(data);
694                 data = NULL;
695         }
696         return data;
697 }
698
699 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
700                        u16 prod, gfp_t gfp)
701 {
702         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
703         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
704         dma_addr_t mapping;
705
706         if (BNXT_RX_PAGE_MODE(bp)) {
707                 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
708
709                 if (!page)
710                         return -ENOMEM;
711
712                 rx_buf->data = page;
713                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
714         } else {
715                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
716
717                 if (!data)
718                         return -ENOMEM;
719
720                 rx_buf->data = data;
721                 rx_buf->data_ptr = data + bp->rx_offset;
722         }
723         rx_buf->mapping = mapping;
724
725         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
726         return 0;
727 }
728
729 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
730 {
731         u16 prod = rxr->rx_prod;
732         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
733         struct rx_bd *cons_bd, *prod_bd;
734
735         prod_rx_buf = &rxr->rx_buf_ring[prod];
736         cons_rx_buf = &rxr->rx_buf_ring[cons];
737
738         prod_rx_buf->data = data;
739         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
740
741         prod_rx_buf->mapping = cons_rx_buf->mapping;
742
743         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
744         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
745
746         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
747 }
748
749 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
750 {
751         u16 next, max = rxr->rx_agg_bmap_size;
752
753         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
754         if (next >= max)
755                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
756         return next;
757 }
758
759 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
760                                      struct bnxt_rx_ring_info *rxr,
761                                      u16 prod, gfp_t gfp)
762 {
763         struct rx_bd *rxbd =
764                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
765         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
766         struct pci_dev *pdev = bp->pdev;
767         struct page *page;
768         dma_addr_t mapping;
769         u16 sw_prod = rxr->rx_sw_agg_prod;
770         unsigned int offset = 0;
771
772         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
773                 page = rxr->rx_page;
774                 if (!page) {
775                         page = alloc_page(gfp);
776                         if (!page)
777                                 return -ENOMEM;
778                         rxr->rx_page = page;
779                         rxr->rx_page_offset = 0;
780                 }
781                 offset = rxr->rx_page_offset;
782                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
783                 if (rxr->rx_page_offset == PAGE_SIZE)
784                         rxr->rx_page = NULL;
785                 else
786                         get_page(page);
787         } else {
788                 page = alloc_page(gfp);
789                 if (!page)
790                         return -ENOMEM;
791         }
792
793         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
794                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
795                                      DMA_ATTR_WEAK_ORDERING);
796         if (dma_mapping_error(&pdev->dev, mapping)) {
797                 __free_page(page);
798                 return -EIO;
799         }
800
801         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
802                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
803
804         __set_bit(sw_prod, rxr->rx_agg_bmap);
805         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
806         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
807
808         rx_agg_buf->page = page;
809         rx_agg_buf->offset = offset;
810         rx_agg_buf->mapping = mapping;
811         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
812         rxbd->rx_bd_opaque = sw_prod;
813         return 0;
814 }
815
816 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
817                                    u32 agg_bufs)
818 {
819         struct bnxt_napi *bnapi = cpr->bnapi;
820         struct bnxt *bp = bnapi->bp;
821         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
822         u16 prod = rxr->rx_agg_prod;
823         u16 sw_prod = rxr->rx_sw_agg_prod;
824         u32 i;
825
826         for (i = 0; i < agg_bufs; i++) {
827                 u16 cons;
828                 struct rx_agg_cmp *agg;
829                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
830                 struct rx_bd *prod_bd;
831                 struct page *page;
832
833                 agg = (struct rx_agg_cmp *)
834                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
835                 cons = agg->rx_agg_cmp_opaque;
836                 __clear_bit(cons, rxr->rx_agg_bmap);
837
838                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
839                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
840
841                 __set_bit(sw_prod, rxr->rx_agg_bmap);
842                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
843                 cons_rx_buf = &rxr->rx_agg_ring[cons];
844
845                 /* It is possible for sw_prod to be equal to cons, so
846                  * set cons_rx_buf->page to NULL first.
847                  */
848                 page = cons_rx_buf->page;
849                 cons_rx_buf->page = NULL;
850                 prod_rx_buf->page = page;
851                 prod_rx_buf->offset = cons_rx_buf->offset;
852
853                 prod_rx_buf->mapping = cons_rx_buf->mapping;
854
855                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
856
857                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
858                 prod_bd->rx_bd_opaque = sw_prod;
859
860                 prod = NEXT_RX_AGG(prod);
861                 sw_prod = NEXT_RX_AGG(sw_prod);
862                 cp_cons = NEXT_CMP(cp_cons);
863         }
864         rxr->rx_agg_prod = prod;
865         rxr->rx_sw_agg_prod = sw_prod;
866 }
867
868 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
869                                         struct bnxt_rx_ring_info *rxr,
870                                         u16 cons, void *data, u8 *data_ptr,
871                                         dma_addr_t dma_addr,
872                                         unsigned int offset_and_len)
873 {
874         unsigned int payload = offset_and_len >> 16;
875         unsigned int len = offset_and_len & 0xffff;
876         struct skb_frag_struct *frag;
877         struct page *page = data;
878         u16 prod = rxr->rx_prod;
879         struct sk_buff *skb;
880         int off, err;
881
882         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
883         if (unlikely(err)) {
884                 bnxt_reuse_rx_data(rxr, cons, data);
885                 return NULL;
886         }
887         dma_addr -= bp->rx_dma_offset;
888         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
889                              DMA_ATTR_WEAK_ORDERING);
890
891         if (unlikely(!payload))
892                 payload = eth_get_headlen(data_ptr, len);
893
894         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
895         if (!skb) {
896                 __free_page(page);
897                 return NULL;
898         }
899
900         off = (void *)data_ptr - page_address(page);
901         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
902         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
903                payload + NET_IP_ALIGN);
904
905         frag = &skb_shinfo(skb)->frags[0];
906         skb_frag_size_sub(frag, payload);
907         frag->page_offset += payload;
908         skb->data_len -= payload;
909         skb->tail += payload;
910
911         return skb;
912 }
913
914 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
915                                    struct bnxt_rx_ring_info *rxr, u16 cons,
916                                    void *data, u8 *data_ptr,
917                                    dma_addr_t dma_addr,
918                                    unsigned int offset_and_len)
919 {
920         u16 prod = rxr->rx_prod;
921         struct sk_buff *skb;
922         int err;
923
924         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
925         if (unlikely(err)) {
926                 bnxt_reuse_rx_data(rxr, cons, data);
927                 return NULL;
928         }
929
930         skb = build_skb(data, 0);
931         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
932                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
933         if (!skb) {
934                 kfree(data);
935                 return NULL;
936         }
937
938         skb_reserve(skb, bp->rx_offset);
939         skb_put(skb, offset_and_len & 0xffff);
940         return skb;
941 }
942
943 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
944                                      struct bnxt_cp_ring_info *cpr,
945                                      struct sk_buff *skb, u16 cp_cons,
946                                      u32 agg_bufs)
947 {
948         struct bnxt_napi *bnapi = cpr->bnapi;
949         struct pci_dev *pdev = bp->pdev;
950         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
951         u16 prod = rxr->rx_agg_prod;
952         u32 i;
953
954         for (i = 0; i < agg_bufs; i++) {
955                 u16 cons, frag_len;
956                 struct rx_agg_cmp *agg;
957                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
958                 struct page *page;
959                 dma_addr_t mapping;
960
961                 agg = (struct rx_agg_cmp *)
962                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
963                 cons = agg->rx_agg_cmp_opaque;
964                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
965                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
966
967                 cons_rx_buf = &rxr->rx_agg_ring[cons];
968                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
969                                    cons_rx_buf->offset, frag_len);
970                 __clear_bit(cons, rxr->rx_agg_bmap);
971
972                 /* It is possible for bnxt_alloc_rx_page() to allocate
973                  * a sw_prod index that equals the cons index, so we
974                  * need to clear the cons entry now.
975                  */
976                 mapping = cons_rx_buf->mapping;
977                 page = cons_rx_buf->page;
978                 cons_rx_buf->page = NULL;
979
980                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
981                         struct skb_shared_info *shinfo;
982                         unsigned int nr_frags;
983
984                         shinfo = skb_shinfo(skb);
985                         nr_frags = --shinfo->nr_frags;
986                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
987
988                         dev_kfree_skb(skb);
989
990                         cons_rx_buf->page = page;
991
992                         /* Update prod since possibly some pages have been
993                          * allocated already.
994                          */
995                         rxr->rx_agg_prod = prod;
996                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
997                         return NULL;
998                 }
999
1000                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1001                                      PCI_DMA_FROMDEVICE,
1002                                      DMA_ATTR_WEAK_ORDERING);
1003
1004                 skb->data_len += frag_len;
1005                 skb->len += frag_len;
1006                 skb->truesize += PAGE_SIZE;
1007
1008                 prod = NEXT_RX_AGG(prod);
1009                 cp_cons = NEXT_CMP(cp_cons);
1010         }
1011         rxr->rx_agg_prod = prod;
1012         return skb;
1013 }
1014
1015 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1016                                u8 agg_bufs, u32 *raw_cons)
1017 {
1018         u16 last;
1019         struct rx_agg_cmp *agg;
1020
1021         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1022         last = RING_CMP(*raw_cons);
1023         agg = (struct rx_agg_cmp *)
1024                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1025         return RX_AGG_CMP_VALID(agg, *raw_cons);
1026 }
1027
1028 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1029                                             unsigned int len,
1030                                             dma_addr_t mapping)
1031 {
1032         struct bnxt *bp = bnapi->bp;
1033         struct pci_dev *pdev = bp->pdev;
1034         struct sk_buff *skb;
1035
1036         skb = napi_alloc_skb(&bnapi->napi, len);
1037         if (!skb)
1038                 return NULL;
1039
1040         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1041                                 bp->rx_dir);
1042
1043         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1044                len + NET_IP_ALIGN);
1045
1046         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1047                                    bp->rx_dir);
1048
1049         skb_put(skb, len);
1050         return skb;
1051 }
1052
1053 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1054                            u32 *raw_cons, void *cmp)
1055 {
1056         struct rx_cmp *rxcmp = cmp;
1057         u32 tmp_raw_cons = *raw_cons;
1058         u8 cmp_type, agg_bufs = 0;
1059
1060         cmp_type = RX_CMP_TYPE(rxcmp);
1061
1062         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1063                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1064                             RX_CMP_AGG_BUFS) >>
1065                            RX_CMP_AGG_BUFS_SHIFT;
1066         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1067                 struct rx_tpa_end_cmp *tpa_end = cmp;
1068
1069                 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1070                             RX_TPA_END_CMP_AGG_BUFS) >>
1071                            RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1072         }
1073
1074         if (agg_bufs) {
1075                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1076                         return -EBUSY;
1077         }
1078         *raw_cons = tmp_raw_cons;
1079         return 0;
1080 }
1081
1082 static void bnxt_queue_sp_work(struct bnxt *bp)
1083 {
1084         if (BNXT_PF(bp))
1085                 queue_work(bnxt_pf_wq, &bp->sp_task);
1086         else
1087                 schedule_work(&bp->sp_task);
1088 }
1089
1090 static void bnxt_cancel_sp_work(struct bnxt *bp)
1091 {
1092         if (BNXT_PF(bp))
1093                 flush_workqueue(bnxt_pf_wq);
1094         else
1095                 cancel_work_sync(&bp->sp_task);
1096 }
1097
1098 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1099 {
1100         if (!rxr->bnapi->in_reset) {
1101                 rxr->bnapi->in_reset = true;
1102                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1103                 bnxt_queue_sp_work(bp);
1104         }
1105         rxr->rx_next_cons = 0xffff;
1106 }
1107
1108 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1109                            struct rx_tpa_start_cmp *tpa_start,
1110                            struct rx_tpa_start_cmp_ext *tpa_start1)
1111 {
1112         u8 agg_id = TPA_START_AGG_ID(tpa_start);
1113         u16 cons, prod;
1114         struct bnxt_tpa_info *tpa_info;
1115         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1116         struct rx_bd *prod_bd;
1117         dma_addr_t mapping;
1118
1119         cons = tpa_start->rx_tpa_start_cmp_opaque;
1120         prod = rxr->rx_prod;
1121         cons_rx_buf = &rxr->rx_buf_ring[cons];
1122         prod_rx_buf = &rxr->rx_buf_ring[prod];
1123         tpa_info = &rxr->rx_tpa[agg_id];
1124
1125         if (unlikely(cons != rxr->rx_next_cons)) {
1126                 bnxt_sched_reset(bp, rxr);
1127                 return;
1128         }
1129         /* Store cfa_code in tpa_info to use in tpa_end
1130          * completion processing.
1131          */
1132         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1133         prod_rx_buf->data = tpa_info->data;
1134         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1135
1136         mapping = tpa_info->mapping;
1137         prod_rx_buf->mapping = mapping;
1138
1139         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1140
1141         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1142
1143         tpa_info->data = cons_rx_buf->data;
1144         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1145         cons_rx_buf->data = NULL;
1146         tpa_info->mapping = cons_rx_buf->mapping;
1147
1148         tpa_info->len =
1149                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1150                                 RX_TPA_START_CMP_LEN_SHIFT;
1151         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1152                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1153
1154                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1155                 tpa_info->gso_type = SKB_GSO_TCPV4;
1156                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1157                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1158                         tpa_info->gso_type = SKB_GSO_TCPV6;
1159                 tpa_info->rss_hash =
1160                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1161         } else {
1162                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1163                 tpa_info->gso_type = 0;
1164                 if (netif_msg_rx_err(bp))
1165                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1166         }
1167         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1168         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1169         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1170
1171         rxr->rx_prod = NEXT_RX(prod);
1172         cons = NEXT_RX(cons);
1173         rxr->rx_next_cons = NEXT_RX(cons);
1174         cons_rx_buf = &rxr->rx_buf_ring[cons];
1175
1176         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1177         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1178         cons_rx_buf->data = NULL;
1179 }
1180
1181 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
1182                            u32 agg_bufs)
1183 {
1184         if (agg_bufs)
1185                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
1186 }
1187
1188 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1189                                            int payload_off, int tcp_ts,
1190                                            struct sk_buff *skb)
1191 {
1192 #ifdef CONFIG_INET
1193         struct tcphdr *th;
1194         int len, nw_off;
1195         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1196         u32 hdr_info = tpa_info->hdr_info;
1197         bool loopback = false;
1198
1199         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1200         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1201         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1202
1203         /* If the packet is an internal loopback packet, the offsets will
1204          * have an extra 4 bytes.
1205          */
1206         if (inner_mac_off == 4) {
1207                 loopback = true;
1208         } else if (inner_mac_off > 4) {
1209                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1210                                             ETH_HLEN - 2));
1211
1212                 /* We only support inner iPv4/ipv6.  If we don't see the
1213                  * correct protocol ID, it must be a loopback packet where
1214                  * the offsets are off by 4.
1215                  */
1216                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1217                         loopback = true;
1218         }
1219         if (loopback) {
1220                 /* internal loopback packet, subtract all offsets by 4 */
1221                 inner_ip_off -= 4;
1222                 inner_mac_off -= 4;
1223                 outer_ip_off -= 4;
1224         }
1225
1226         nw_off = inner_ip_off - ETH_HLEN;
1227         skb_set_network_header(skb, nw_off);
1228         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1229                 struct ipv6hdr *iph = ipv6_hdr(skb);
1230
1231                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1232                 len = skb->len - skb_transport_offset(skb);
1233                 th = tcp_hdr(skb);
1234                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1235         } else {
1236                 struct iphdr *iph = ip_hdr(skb);
1237
1238                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1239                 len = skb->len - skb_transport_offset(skb);
1240                 th = tcp_hdr(skb);
1241                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1242         }
1243
1244         if (inner_mac_off) { /* tunnel */
1245                 struct udphdr *uh = NULL;
1246                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1247                                             ETH_HLEN - 2));
1248
1249                 if (proto == htons(ETH_P_IP)) {
1250                         struct iphdr *iph = (struct iphdr *)skb->data;
1251
1252                         if (iph->protocol == IPPROTO_UDP)
1253                                 uh = (struct udphdr *)(iph + 1);
1254                 } else {
1255                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1256
1257                         if (iph->nexthdr == IPPROTO_UDP)
1258                                 uh = (struct udphdr *)(iph + 1);
1259                 }
1260                 if (uh) {
1261                         if (uh->check)
1262                                 skb_shinfo(skb)->gso_type |=
1263                                         SKB_GSO_UDP_TUNNEL_CSUM;
1264                         else
1265                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1266                 }
1267         }
1268 #endif
1269         return skb;
1270 }
1271
1272 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1273 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1274
1275 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1276                                            int payload_off, int tcp_ts,
1277                                            struct sk_buff *skb)
1278 {
1279 #ifdef CONFIG_INET
1280         struct tcphdr *th;
1281         int len, nw_off, tcp_opt_len = 0;
1282
1283         if (tcp_ts)
1284                 tcp_opt_len = 12;
1285
1286         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1287                 struct iphdr *iph;
1288
1289                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1290                          ETH_HLEN;
1291                 skb_set_network_header(skb, nw_off);
1292                 iph = ip_hdr(skb);
1293                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1294                 len = skb->len - skb_transport_offset(skb);
1295                 th = tcp_hdr(skb);
1296                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1297         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1298                 struct ipv6hdr *iph;
1299
1300                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1301                          ETH_HLEN;
1302                 skb_set_network_header(skb, nw_off);
1303                 iph = ipv6_hdr(skb);
1304                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1305                 len = skb->len - skb_transport_offset(skb);
1306                 th = tcp_hdr(skb);
1307                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1308         } else {
1309                 dev_kfree_skb_any(skb);
1310                 return NULL;
1311         }
1312
1313         if (nw_off) { /* tunnel */
1314                 struct udphdr *uh = NULL;
1315
1316                 if (skb->protocol == htons(ETH_P_IP)) {
1317                         struct iphdr *iph = (struct iphdr *)skb->data;
1318
1319                         if (iph->protocol == IPPROTO_UDP)
1320                                 uh = (struct udphdr *)(iph + 1);
1321                 } else {
1322                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1323
1324                         if (iph->nexthdr == IPPROTO_UDP)
1325                                 uh = (struct udphdr *)(iph + 1);
1326                 }
1327                 if (uh) {
1328                         if (uh->check)
1329                                 skb_shinfo(skb)->gso_type |=
1330                                         SKB_GSO_UDP_TUNNEL_CSUM;
1331                         else
1332                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1333                 }
1334         }
1335 #endif
1336         return skb;
1337 }
1338
1339 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1340                                            struct bnxt_tpa_info *tpa_info,
1341                                            struct rx_tpa_end_cmp *tpa_end,
1342                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1343                                            struct sk_buff *skb)
1344 {
1345 #ifdef CONFIG_INET
1346         int payload_off;
1347         u16 segs;
1348
1349         segs = TPA_END_TPA_SEGS(tpa_end);
1350         if (segs == 1)
1351                 return skb;
1352
1353         NAPI_GRO_CB(skb)->count = segs;
1354         skb_shinfo(skb)->gso_size =
1355                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1356         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1357         payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1358                        RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1359                       RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1360         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1361         if (likely(skb))
1362                 tcp_gro_complete(skb);
1363 #endif
1364         return skb;
1365 }
1366
1367 /* Given the cfa_code of a received packet determine which
1368  * netdev (vf-rep or PF) the packet is destined to.
1369  */
1370 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1371 {
1372         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1373
1374         /* if vf-rep dev is NULL, the must belongs to the PF */
1375         return dev ? dev : bp->dev;
1376 }
1377
1378 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1379                                            struct bnxt_cp_ring_info *cpr,
1380                                            u32 *raw_cons,
1381                                            struct rx_tpa_end_cmp *tpa_end,
1382                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1383                                            u8 *event)
1384 {
1385         struct bnxt_napi *bnapi = cpr->bnapi;
1386         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1387         u8 agg_id = TPA_END_AGG_ID(tpa_end);
1388         u8 *data_ptr, agg_bufs;
1389         u16 cp_cons = RING_CMP(*raw_cons);
1390         unsigned int len;
1391         struct bnxt_tpa_info *tpa_info;
1392         dma_addr_t mapping;
1393         struct sk_buff *skb;
1394         void *data;
1395
1396         if (unlikely(bnapi->in_reset)) {
1397                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1398
1399                 if (rc < 0)
1400                         return ERR_PTR(-EBUSY);
1401                 return NULL;
1402         }
1403
1404         tpa_info = &rxr->rx_tpa[agg_id];
1405         data = tpa_info->data;
1406         data_ptr = tpa_info->data_ptr;
1407         prefetch(data_ptr);
1408         len = tpa_info->len;
1409         mapping = tpa_info->mapping;
1410
1411         agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1412                     RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1413
1414         if (agg_bufs) {
1415                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1416                         return ERR_PTR(-EBUSY);
1417
1418                 *event |= BNXT_AGG_EVENT;
1419                 cp_cons = NEXT_CMP(cp_cons);
1420         }
1421
1422         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1423                 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1424                 if (agg_bufs > MAX_SKB_FRAGS)
1425                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1426                                     agg_bufs, (int)MAX_SKB_FRAGS);
1427                 return NULL;
1428         }
1429
1430         if (len <= bp->rx_copy_thresh) {
1431                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1432                 if (!skb) {
1433                         bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1434                         return NULL;
1435                 }
1436         } else {
1437                 u8 *new_data;
1438                 dma_addr_t new_mapping;
1439
1440                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1441                 if (!new_data) {
1442                         bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1443                         return NULL;
1444                 }
1445
1446                 tpa_info->data = new_data;
1447                 tpa_info->data_ptr = new_data + bp->rx_offset;
1448                 tpa_info->mapping = new_mapping;
1449
1450                 skb = build_skb(data, 0);
1451                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1452                                        bp->rx_buf_use_size, bp->rx_dir,
1453                                        DMA_ATTR_WEAK_ORDERING);
1454
1455                 if (!skb) {
1456                         kfree(data);
1457                         bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1458                         return NULL;
1459                 }
1460                 skb_reserve(skb, bp->rx_offset);
1461                 skb_put(skb, len);
1462         }
1463
1464         if (agg_bufs) {
1465                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
1466                 if (!skb) {
1467                         /* Page reuse already handled by bnxt_rx_pages(). */
1468                         return NULL;
1469                 }
1470         }
1471
1472         skb->protocol =
1473                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1474
1475         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1476                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1477
1478         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1479             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1480                 u16 vlan_proto = tpa_info->metadata >>
1481                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1482                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1483
1484                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1485         }
1486
1487         skb_checksum_none_assert(skb);
1488         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1489                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1490                 skb->csum_level =
1491                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1492         }
1493
1494         if (TPA_END_GRO(tpa_end))
1495                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1496
1497         return skb;
1498 }
1499
1500 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1501                              struct sk_buff *skb)
1502 {
1503         if (skb->dev != bp->dev) {
1504                 /* this packet belongs to a vf-rep */
1505                 bnxt_vf_rep_rx(bp, skb);
1506                 return;
1507         }
1508         skb_record_rx_queue(skb, bnapi->index);
1509         napi_gro_receive(&bnapi->napi, skb);
1510 }
1511
1512 /* returns the following:
1513  * 1       - 1 packet successfully received
1514  * 0       - successful TPA_START, packet not completed yet
1515  * -EBUSY  - completion ring does not have all the agg buffers yet
1516  * -ENOMEM - packet aborted due to out of memory
1517  * -EIO    - packet aborted due to hw error indicated in BD
1518  */
1519 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1520                        u32 *raw_cons, u8 *event)
1521 {
1522         struct bnxt_napi *bnapi = cpr->bnapi;
1523         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1524         struct net_device *dev = bp->dev;
1525         struct rx_cmp *rxcmp;
1526         struct rx_cmp_ext *rxcmp1;
1527         u32 tmp_raw_cons = *raw_cons;
1528         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1529         struct bnxt_sw_rx_bd *rx_buf;
1530         unsigned int len;
1531         u8 *data_ptr, agg_bufs, cmp_type;
1532         dma_addr_t dma_addr;
1533         struct sk_buff *skb;
1534         void *data;
1535         int rc = 0;
1536         u32 misc;
1537
1538         rxcmp = (struct rx_cmp *)
1539                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1540
1541         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1542         cp_cons = RING_CMP(tmp_raw_cons);
1543         rxcmp1 = (struct rx_cmp_ext *)
1544                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1545
1546         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1547                 return -EBUSY;
1548
1549         cmp_type = RX_CMP_TYPE(rxcmp);
1550
1551         prod = rxr->rx_prod;
1552
1553         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1554                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1555                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1556
1557                 *event |= BNXT_RX_EVENT;
1558                 goto next_rx_no_prod_no_len;
1559
1560         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1561                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1562                                    (struct rx_tpa_end_cmp *)rxcmp,
1563                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1564
1565                 if (IS_ERR(skb))
1566                         return -EBUSY;
1567
1568                 rc = -ENOMEM;
1569                 if (likely(skb)) {
1570                         bnxt_deliver_skb(bp, bnapi, skb);
1571                         rc = 1;
1572                 }
1573                 *event |= BNXT_RX_EVENT;
1574                 goto next_rx_no_prod_no_len;
1575         }
1576
1577         cons = rxcmp->rx_cmp_opaque;
1578         rx_buf = &rxr->rx_buf_ring[cons];
1579         data = rx_buf->data;
1580         data_ptr = rx_buf->data_ptr;
1581         if (unlikely(cons != rxr->rx_next_cons)) {
1582                 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1583
1584                 bnxt_sched_reset(bp, rxr);
1585                 return rc1;
1586         }
1587         prefetch(data_ptr);
1588
1589         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1590         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1591
1592         if (agg_bufs) {
1593                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1594                         return -EBUSY;
1595
1596                 cp_cons = NEXT_CMP(cp_cons);
1597                 *event |= BNXT_AGG_EVENT;
1598         }
1599         *event |= BNXT_RX_EVENT;
1600
1601         rx_buf->data = NULL;
1602         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1603                 bnxt_reuse_rx_data(rxr, cons, data);
1604                 if (agg_bufs)
1605                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
1606
1607                 rc = -EIO;
1608                 goto next_rx;
1609         }
1610
1611         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1612         dma_addr = rx_buf->mapping;
1613
1614         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1615                 rc = 1;
1616                 goto next_rx;
1617         }
1618
1619         if (len <= bp->rx_copy_thresh) {
1620                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1621                 bnxt_reuse_rx_data(rxr, cons, data);
1622                 if (!skb) {
1623                         rc = -ENOMEM;
1624                         goto next_rx;
1625                 }
1626         } else {
1627                 u32 payload;
1628
1629                 if (rx_buf->data_ptr == data_ptr)
1630                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1631                 else
1632                         payload = 0;
1633                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1634                                       payload | len);
1635                 if (!skb) {
1636                         rc = -ENOMEM;
1637                         goto next_rx;
1638                 }
1639         }
1640
1641         if (agg_bufs) {
1642                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
1643                 if (!skb) {
1644                         rc = -ENOMEM;
1645                         goto next_rx;
1646                 }
1647         }
1648
1649         if (RX_CMP_HASH_VALID(rxcmp)) {
1650                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1651                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1652
1653                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1654                 if (hash_type != 1 && hash_type != 3)
1655                         type = PKT_HASH_TYPE_L3;
1656                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1657         }
1658
1659         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1660         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1661
1662         if ((rxcmp1->rx_cmp_flags2 &
1663              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1664             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1665                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1666                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1667                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1668
1669                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1670         }
1671
1672         skb_checksum_none_assert(skb);
1673         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1674                 if (dev->features & NETIF_F_RXCSUM) {
1675                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1676                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1677                 }
1678         } else {
1679                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1680                         if (dev->features & NETIF_F_RXCSUM)
1681                                 bnapi->cp_ring.rx_l4_csum_errors++;
1682                 }
1683         }
1684
1685         bnxt_deliver_skb(bp, bnapi, skb);
1686         rc = 1;
1687
1688 next_rx:
1689         rxr->rx_prod = NEXT_RX(prod);
1690         rxr->rx_next_cons = NEXT_RX(cons);
1691
1692         cpr->rx_packets += 1;
1693         cpr->rx_bytes += len;
1694
1695 next_rx_no_prod_no_len:
1696         *raw_cons = tmp_raw_cons;
1697
1698         return rc;
1699 }
1700
1701 /* In netpoll mode, if we are using a combined completion ring, we need to
1702  * discard the rx packets and recycle the buffers.
1703  */
1704 static int bnxt_force_rx_discard(struct bnxt *bp,
1705                                  struct bnxt_cp_ring_info *cpr,
1706                                  u32 *raw_cons, u8 *event)
1707 {
1708         u32 tmp_raw_cons = *raw_cons;
1709         struct rx_cmp_ext *rxcmp1;
1710         struct rx_cmp *rxcmp;
1711         u16 cp_cons;
1712         u8 cmp_type;
1713
1714         cp_cons = RING_CMP(tmp_raw_cons);
1715         rxcmp = (struct rx_cmp *)
1716                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1717
1718         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1719         cp_cons = RING_CMP(tmp_raw_cons);
1720         rxcmp1 = (struct rx_cmp_ext *)
1721                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1722
1723         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1724                 return -EBUSY;
1725
1726         cmp_type = RX_CMP_TYPE(rxcmp);
1727         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1728                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1729                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1730         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1731                 struct rx_tpa_end_cmp_ext *tpa_end1;
1732
1733                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1734                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1735                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1736         }
1737         return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1738 }
1739
1740 #define BNXT_GET_EVENT_PORT(data)       \
1741         ((data) &                       \
1742          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1743
1744 static int bnxt_async_event_process(struct bnxt *bp,
1745                                     struct hwrm_async_event_cmpl *cmpl)
1746 {
1747         u16 event_id = le16_to_cpu(cmpl->event_id);
1748
1749         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1750         switch (event_id) {
1751         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1752                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1753                 struct bnxt_link_info *link_info = &bp->link_info;
1754
1755                 if (BNXT_VF(bp))
1756                         goto async_event_process_exit;
1757
1758                 /* print unsupported speed warning in forced speed mode only */
1759                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1760                     (data1 & 0x20000)) {
1761                         u16 fw_speed = link_info->force_link_speed;
1762                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1763
1764                         if (speed != SPEED_UNKNOWN)
1765                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1766                                             speed);
1767                 }
1768                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1769         }
1770         /* fall through */
1771         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1772                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1773                 break;
1774         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1775                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1776                 break;
1777         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1778                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1779                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1780
1781                 if (BNXT_VF(bp))
1782                         break;
1783
1784                 if (bp->pf.port_id != port_id)
1785                         break;
1786
1787                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1788                 break;
1789         }
1790         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1791                 if (BNXT_PF(bp))
1792                         goto async_event_process_exit;
1793                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1794                 break;
1795         default:
1796                 goto async_event_process_exit;
1797         }
1798         bnxt_queue_sp_work(bp);
1799 async_event_process_exit:
1800         bnxt_ulp_async_events(bp, cmpl);
1801         return 0;
1802 }
1803
1804 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1805 {
1806         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1807         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1808         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1809                                 (struct hwrm_fwd_req_cmpl *)txcmp;
1810
1811         switch (cmpl_type) {
1812         case CMPL_BASE_TYPE_HWRM_DONE:
1813                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1814                 if (seq_id == bp->hwrm_intr_seq_id)
1815                         bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
1816                 else
1817                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1818                 break;
1819
1820         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1821                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1822
1823                 if ((vf_id < bp->pf.first_vf_id) ||
1824                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1825                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1826                                    vf_id);
1827                         return -EINVAL;
1828                 }
1829
1830                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1831                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1832                 bnxt_queue_sp_work(bp);
1833                 break;
1834
1835         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1836                 bnxt_async_event_process(bp,
1837                                          (struct hwrm_async_event_cmpl *)txcmp);
1838
1839         default:
1840                 break;
1841         }
1842
1843         return 0;
1844 }
1845
1846 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1847 {
1848         struct bnxt_napi *bnapi = dev_instance;
1849         struct bnxt *bp = bnapi->bp;
1850         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1851         u32 cons = RING_CMP(cpr->cp_raw_cons);
1852
1853         cpr->event_ctr++;
1854         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1855         napi_schedule(&bnapi->napi);
1856         return IRQ_HANDLED;
1857 }
1858
1859 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1860 {
1861         u32 raw_cons = cpr->cp_raw_cons;
1862         u16 cons = RING_CMP(raw_cons);
1863         struct tx_cmp *txcmp;
1864
1865         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1866
1867         return TX_CMP_VALID(txcmp, raw_cons);
1868 }
1869
1870 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1871 {
1872         struct bnxt_napi *bnapi = dev_instance;
1873         struct bnxt *bp = bnapi->bp;
1874         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1875         u32 cons = RING_CMP(cpr->cp_raw_cons);
1876         u32 int_status;
1877
1878         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1879
1880         if (!bnxt_has_work(bp, cpr)) {
1881                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1882                 /* return if erroneous interrupt */
1883                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1884                         return IRQ_NONE;
1885         }
1886
1887         /* disable ring IRQ */
1888         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
1889
1890         /* Return here if interrupt is shared and is disabled. */
1891         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1892                 return IRQ_HANDLED;
1893
1894         napi_schedule(&bnapi->napi);
1895         return IRQ_HANDLED;
1896 }
1897
1898 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1899                             int budget)
1900 {
1901         struct bnxt_napi *bnapi = cpr->bnapi;
1902         u32 raw_cons = cpr->cp_raw_cons;
1903         u32 cons;
1904         int tx_pkts = 0;
1905         int rx_pkts = 0;
1906         u8 event = 0;
1907         struct tx_cmp *txcmp;
1908
1909         cpr->has_more_work = 0;
1910         while (1) {
1911                 int rc;
1912
1913                 cons = RING_CMP(raw_cons);
1914                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1915
1916                 if (!TX_CMP_VALID(txcmp, raw_cons))
1917                         break;
1918
1919                 /* The valid test of the entry must be done first before
1920                  * reading any further.
1921                  */
1922                 dma_rmb();
1923                 cpr->had_work_done = 1;
1924                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1925                         tx_pkts++;
1926                         /* return full budget so NAPI will complete. */
1927                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
1928                                 rx_pkts = budget;
1929                                 raw_cons = NEXT_RAW_CMP(raw_cons);
1930                                 if (budget)
1931                                         cpr->has_more_work = 1;
1932                                 break;
1933                         }
1934                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1935                         if (likely(budget))
1936                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
1937                         else
1938                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
1939                                                            &event);
1940                         if (likely(rc >= 0))
1941                                 rx_pkts += rc;
1942                         /* Increment rx_pkts when rc is -ENOMEM to count towards
1943                          * the NAPI budget.  Otherwise, we may potentially loop
1944                          * here forever if we consistently cannot allocate
1945                          * buffers.
1946                          */
1947                         else if (rc == -ENOMEM && budget)
1948                                 rx_pkts++;
1949                         else if (rc == -EBUSY)  /* partial completion */
1950                                 break;
1951                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1952                                      CMPL_BASE_TYPE_HWRM_DONE) ||
1953                                     (TX_CMP_TYPE(txcmp) ==
1954                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1955                                     (TX_CMP_TYPE(txcmp) ==
1956                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1957                         bnxt_hwrm_handler(bp, txcmp);
1958                 }
1959                 raw_cons = NEXT_RAW_CMP(raw_cons);
1960
1961                 if (rx_pkts && rx_pkts == budget) {
1962                         cpr->has_more_work = 1;
1963                         break;
1964                 }
1965         }
1966
1967         if (event & BNXT_TX_EVENT) {
1968                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
1969                 u16 prod = txr->tx_prod;
1970
1971                 /* Sync BD data before updating doorbell */
1972                 wmb();
1973
1974                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
1975         }
1976
1977         cpr->cp_raw_cons = raw_cons;
1978         bnapi->tx_pkts += tx_pkts;
1979         bnapi->events |= event;
1980         return rx_pkts;
1981 }
1982
1983 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
1984 {
1985         if (bnapi->tx_pkts) {
1986                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
1987                 bnapi->tx_pkts = 0;
1988         }
1989
1990         if (bnapi->events & BNXT_RX_EVENT) {
1991                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1992
1993                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
1994                 if (bnapi->events & BNXT_AGG_EVENT)
1995                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
1996         }
1997         bnapi->events = 0;
1998 }
1999
2000 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2001                           int budget)
2002 {
2003         struct bnxt_napi *bnapi = cpr->bnapi;
2004         int rx_pkts;
2005
2006         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2007
2008         /* ACK completion ring before freeing tx ring and producing new
2009          * buffers in rx/agg rings to prevent overflowing the completion
2010          * ring.
2011          */
2012         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2013
2014         __bnxt_poll_work_done(bp, bnapi);
2015         return rx_pkts;
2016 }
2017
2018 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2019 {
2020         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2021         struct bnxt *bp = bnapi->bp;
2022         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2023         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2024         struct tx_cmp *txcmp;
2025         struct rx_cmp_ext *rxcmp1;
2026         u32 cp_cons, tmp_raw_cons;
2027         u32 raw_cons = cpr->cp_raw_cons;
2028         u32 rx_pkts = 0;
2029         u8 event = 0;
2030
2031         while (1) {
2032                 int rc;
2033
2034                 cp_cons = RING_CMP(raw_cons);
2035                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2036
2037                 if (!TX_CMP_VALID(txcmp, raw_cons))
2038                         break;
2039
2040                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2041                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2042                         cp_cons = RING_CMP(tmp_raw_cons);
2043                         rxcmp1 = (struct rx_cmp_ext *)
2044                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2045
2046                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2047                                 break;
2048
2049                         /* force an error to recycle the buffer */
2050                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2051                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2052
2053                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2054                         if (likely(rc == -EIO) && budget)
2055                                 rx_pkts++;
2056                         else if (rc == -EBUSY)  /* partial completion */
2057                                 break;
2058                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2059                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2060                         bnxt_hwrm_handler(bp, txcmp);
2061                 } else {
2062                         netdev_err(bp->dev,
2063                                    "Invalid completion received on special ring\n");
2064                 }
2065                 raw_cons = NEXT_RAW_CMP(raw_cons);
2066
2067                 if (rx_pkts == budget)
2068                         break;
2069         }
2070
2071         cpr->cp_raw_cons = raw_cons;
2072         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2073         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2074
2075         if (event & BNXT_AGG_EVENT)
2076                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2077
2078         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2079                 napi_complete_done(napi, rx_pkts);
2080                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2081         }
2082         return rx_pkts;
2083 }
2084
2085 static int bnxt_poll(struct napi_struct *napi, int budget)
2086 {
2087         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2088         struct bnxt *bp = bnapi->bp;
2089         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2090         int work_done = 0;
2091
2092         while (1) {
2093                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2094
2095                 if (work_done >= budget) {
2096                         if (!budget)
2097                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2098                         break;
2099                 }
2100
2101                 if (!bnxt_has_work(bp, cpr)) {
2102                         if (napi_complete_done(napi, work_done))
2103                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2104                         break;
2105                 }
2106         }
2107         if (bp->flags & BNXT_FLAG_DIM) {
2108                 struct net_dim_sample dim_sample;
2109
2110                 net_dim_sample(cpr->event_ctr,
2111                                cpr->rx_packets,
2112                                cpr->rx_bytes,
2113                                &dim_sample);
2114                 net_dim(&cpr->dim, dim_sample);
2115         }
2116         mmiowb();
2117         return work_done;
2118 }
2119
2120 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2121 {
2122         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2123         int i, work_done = 0;
2124
2125         for (i = 0; i < 2; i++) {
2126                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2127
2128                 if (cpr2) {
2129                         work_done += __bnxt_poll_work(bp, cpr2,
2130                                                       budget - work_done);
2131                         cpr->has_more_work |= cpr2->has_more_work;
2132                 }
2133         }
2134         return work_done;
2135 }
2136
2137 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2138                                  u64 dbr_type, bool all)
2139 {
2140         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2141         int i;
2142
2143         for (i = 0; i < 2; i++) {
2144                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2145                 struct bnxt_db_info *db;
2146
2147                 if (cpr2 && (all || cpr2->had_work_done)) {
2148                         db = &cpr2->cp_db;
2149                         writeq(db->db_key64 | dbr_type |
2150                                RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2151                         cpr2->had_work_done = 0;
2152                 }
2153         }
2154         __bnxt_poll_work_done(bp, bnapi);
2155 }
2156
2157 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2158 {
2159         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2160         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2161         u32 raw_cons = cpr->cp_raw_cons;
2162         struct bnxt *bp = bnapi->bp;
2163         struct nqe_cn *nqcmp;
2164         int work_done = 0;
2165         u32 cons;
2166
2167         if (cpr->has_more_work) {
2168                 cpr->has_more_work = 0;
2169                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2170                 if (cpr->has_more_work) {
2171                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2172                         return work_done;
2173                 }
2174                 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2175                 if (napi_complete_done(napi, work_done))
2176                         BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2177                 return work_done;
2178         }
2179         while (1) {
2180                 cons = RING_CMP(raw_cons);
2181                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2182
2183                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2184                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2185                                              false);
2186                         cpr->cp_raw_cons = raw_cons;
2187                         if (napi_complete_done(napi, work_done))
2188                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2189                                                   cpr->cp_raw_cons);
2190                         return work_done;
2191                 }
2192
2193                 /* The valid test of the entry must be done first before
2194                  * reading any further.
2195                  */
2196                 dma_rmb();
2197
2198                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2199                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2200                         struct bnxt_cp_ring_info *cpr2;
2201
2202                         cpr2 = cpr->cp_ring_arr[idx];
2203                         work_done += __bnxt_poll_work(bp, cpr2,
2204                                                       budget - work_done);
2205                         cpr->has_more_work = cpr2->has_more_work;
2206                 } else {
2207                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2208                 }
2209                 raw_cons = NEXT_RAW_CMP(raw_cons);
2210                 if (cpr->has_more_work)
2211                         break;
2212         }
2213         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2214         cpr->cp_raw_cons = raw_cons;
2215         return work_done;
2216 }
2217
2218 static void bnxt_free_tx_skbs(struct bnxt *bp)
2219 {
2220         int i, max_idx;
2221         struct pci_dev *pdev = bp->pdev;
2222
2223         if (!bp->tx_ring)
2224                 return;
2225
2226         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2227         for (i = 0; i < bp->tx_nr_rings; i++) {
2228                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2229                 int j;
2230
2231                 for (j = 0; j < max_idx;) {
2232                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2233                         struct sk_buff *skb = tx_buf->skb;
2234                         int k, last;
2235
2236                         if (!skb) {
2237                                 j++;
2238                                 continue;
2239                         }
2240
2241                         tx_buf->skb = NULL;
2242
2243                         if (tx_buf->is_push) {
2244                                 dev_kfree_skb(skb);
2245                                 j += 2;
2246                                 continue;
2247                         }
2248
2249                         dma_unmap_single(&pdev->dev,
2250                                          dma_unmap_addr(tx_buf, mapping),
2251                                          skb_headlen(skb),
2252                                          PCI_DMA_TODEVICE);
2253
2254                         last = tx_buf->nr_frags;
2255                         j += 2;
2256                         for (k = 0; k < last; k++, j++) {
2257                                 int ring_idx = j & bp->tx_ring_mask;
2258                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2259
2260                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2261                                 dma_unmap_page(
2262                                         &pdev->dev,
2263                                         dma_unmap_addr(tx_buf, mapping),
2264                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2265                         }
2266                         dev_kfree_skb(skb);
2267                 }
2268                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2269         }
2270 }
2271
2272 static void bnxt_free_rx_skbs(struct bnxt *bp)
2273 {
2274         int i, max_idx, max_agg_idx;
2275         struct pci_dev *pdev = bp->pdev;
2276
2277         if (!bp->rx_ring)
2278                 return;
2279
2280         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2281         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2282         for (i = 0; i < bp->rx_nr_rings; i++) {
2283                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2284                 int j;
2285
2286                 if (rxr->rx_tpa) {
2287                         for (j = 0; j < MAX_TPA; j++) {
2288                                 struct bnxt_tpa_info *tpa_info =
2289                                                         &rxr->rx_tpa[j];
2290                                 u8 *data = tpa_info->data;
2291
2292                                 if (!data)
2293                                         continue;
2294
2295                                 dma_unmap_single_attrs(&pdev->dev,
2296                                                        tpa_info->mapping,
2297                                                        bp->rx_buf_use_size,
2298                                                        bp->rx_dir,
2299                                                        DMA_ATTR_WEAK_ORDERING);
2300
2301                                 tpa_info->data = NULL;
2302
2303                                 kfree(data);
2304                         }
2305                 }
2306
2307                 for (j = 0; j < max_idx; j++) {
2308                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2309                         dma_addr_t mapping = rx_buf->mapping;
2310                         void *data = rx_buf->data;
2311
2312                         if (!data)
2313                                 continue;
2314
2315                         rx_buf->data = NULL;
2316
2317                         if (BNXT_RX_PAGE_MODE(bp)) {
2318                                 mapping -= bp->rx_dma_offset;
2319                                 dma_unmap_page_attrs(&pdev->dev, mapping,
2320                                                      PAGE_SIZE, bp->rx_dir,
2321                                                      DMA_ATTR_WEAK_ORDERING);
2322                                 __free_page(data);
2323                         } else {
2324                                 dma_unmap_single_attrs(&pdev->dev, mapping,
2325                                                        bp->rx_buf_use_size,
2326                                                        bp->rx_dir,
2327                                                        DMA_ATTR_WEAK_ORDERING);
2328                                 kfree(data);
2329                         }
2330                 }
2331
2332                 for (j = 0; j < max_agg_idx; j++) {
2333                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2334                                 &rxr->rx_agg_ring[j];
2335                         struct page *page = rx_agg_buf->page;
2336
2337                         if (!page)
2338                                 continue;
2339
2340                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2341                                              BNXT_RX_PAGE_SIZE,
2342                                              PCI_DMA_FROMDEVICE,
2343                                              DMA_ATTR_WEAK_ORDERING);
2344
2345                         rx_agg_buf->page = NULL;
2346                         __clear_bit(j, rxr->rx_agg_bmap);
2347
2348                         __free_page(page);
2349                 }
2350                 if (rxr->rx_page) {
2351                         __free_page(rxr->rx_page);
2352                         rxr->rx_page = NULL;
2353                 }
2354         }
2355 }
2356
2357 static void bnxt_free_skbs(struct bnxt *bp)
2358 {
2359         bnxt_free_tx_skbs(bp);
2360         bnxt_free_rx_skbs(bp);
2361 }
2362
2363 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2364 {
2365         struct pci_dev *pdev = bp->pdev;
2366         int i;
2367
2368         for (i = 0; i < rmem->nr_pages; i++) {
2369                 if (!rmem->pg_arr[i])
2370                         continue;
2371
2372                 dma_free_coherent(&pdev->dev, rmem->page_size,
2373                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2374
2375                 rmem->pg_arr[i] = NULL;
2376         }
2377         if (rmem->pg_tbl) {
2378                 size_t pg_tbl_size = rmem->nr_pages * 8;
2379
2380                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2381                         pg_tbl_size = rmem->page_size;
2382                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2383                                   rmem->pg_tbl, rmem->pg_tbl_map);
2384                 rmem->pg_tbl = NULL;
2385         }
2386         if (rmem->vmem_size && *rmem->vmem) {
2387                 vfree(*rmem->vmem);
2388                 *rmem->vmem = NULL;
2389         }
2390 }
2391
2392 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2393 {
2394         struct pci_dev *pdev = bp->pdev;
2395         u64 valid_bit = 0;
2396         int i;
2397
2398         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2399                 valid_bit = PTU_PTE_VALID;
2400         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2401                 size_t pg_tbl_size = rmem->nr_pages * 8;
2402
2403                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2404                         pg_tbl_size = rmem->page_size;
2405                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2406                                                   &rmem->pg_tbl_map,
2407                                                   GFP_KERNEL);
2408                 if (!rmem->pg_tbl)
2409                         return -ENOMEM;
2410         }
2411
2412         for (i = 0; i < rmem->nr_pages; i++) {
2413                 u64 extra_bits = valid_bit;
2414
2415                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2416                                                      rmem->page_size,
2417                                                      &rmem->dma_arr[i],
2418                                                      GFP_KERNEL);
2419                 if (!rmem->pg_arr[i])
2420                         return -ENOMEM;
2421
2422                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2423                         if (i == rmem->nr_pages - 2 &&
2424                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2425                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2426                         else if (i == rmem->nr_pages - 1 &&
2427                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2428                                 extra_bits |= PTU_PTE_LAST;
2429                         rmem->pg_tbl[i] =
2430                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2431                 }
2432         }
2433
2434         if (rmem->vmem_size) {
2435                 *rmem->vmem = vzalloc(rmem->vmem_size);
2436                 if (!(*rmem->vmem))
2437                         return -ENOMEM;
2438         }
2439         return 0;
2440 }
2441
2442 static void bnxt_free_rx_rings(struct bnxt *bp)
2443 {
2444         int i;
2445
2446         if (!bp->rx_ring)
2447                 return;
2448
2449         for (i = 0; i < bp->rx_nr_rings; i++) {
2450                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2451                 struct bnxt_ring_struct *ring;
2452
2453                 if (rxr->xdp_prog)
2454                         bpf_prog_put(rxr->xdp_prog);
2455
2456                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2457                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2458
2459                 kfree(rxr->rx_tpa);
2460                 rxr->rx_tpa = NULL;
2461
2462                 kfree(rxr->rx_agg_bmap);
2463                 rxr->rx_agg_bmap = NULL;
2464
2465                 ring = &rxr->rx_ring_struct;
2466                 bnxt_free_ring(bp, &ring->ring_mem);
2467
2468                 ring = &rxr->rx_agg_ring_struct;
2469                 bnxt_free_ring(bp, &ring->ring_mem);
2470         }
2471 }
2472
2473 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2474 {
2475         int i, rc, agg_rings = 0, tpa_rings = 0;
2476
2477         if (!bp->rx_ring)
2478                 return -ENOMEM;
2479
2480         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2481                 agg_rings = 1;
2482
2483         if (bp->flags & BNXT_FLAG_TPA)
2484                 tpa_rings = 1;
2485
2486         for (i = 0; i < bp->rx_nr_rings; i++) {
2487                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2488                 struct bnxt_ring_struct *ring;
2489
2490                 ring = &rxr->rx_ring_struct;
2491
2492                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2493                 if (rc < 0)
2494                         return rc;
2495
2496                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2497                 if (rc)
2498                         return rc;
2499
2500                 ring->grp_idx = i;
2501                 if (agg_rings) {
2502                         u16 mem_size;
2503
2504                         ring = &rxr->rx_agg_ring_struct;
2505                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2506                         if (rc)
2507                                 return rc;
2508
2509                         ring->grp_idx = i;
2510                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2511                         mem_size = rxr->rx_agg_bmap_size / 8;
2512                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2513                         if (!rxr->rx_agg_bmap)
2514                                 return -ENOMEM;
2515
2516                         if (tpa_rings) {
2517                                 rxr->rx_tpa = kcalloc(MAX_TPA,
2518                                                 sizeof(struct bnxt_tpa_info),
2519                                                 GFP_KERNEL);
2520                                 if (!rxr->rx_tpa)
2521                                         return -ENOMEM;
2522                         }
2523                 }
2524         }
2525         return 0;
2526 }
2527
2528 static void bnxt_free_tx_rings(struct bnxt *bp)
2529 {
2530         int i;
2531         struct pci_dev *pdev = bp->pdev;
2532
2533         if (!bp->tx_ring)
2534                 return;
2535
2536         for (i = 0; i < bp->tx_nr_rings; i++) {
2537                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2538                 struct bnxt_ring_struct *ring;
2539
2540                 if (txr->tx_push) {
2541                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2542                                           txr->tx_push, txr->tx_push_mapping);
2543                         txr->tx_push = NULL;
2544                 }
2545
2546                 ring = &txr->tx_ring_struct;
2547
2548                 bnxt_free_ring(bp, &ring->ring_mem);
2549         }
2550 }
2551
2552 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2553 {
2554         int i, j, rc;
2555         struct pci_dev *pdev = bp->pdev;
2556
2557         bp->tx_push_size = 0;
2558         if (bp->tx_push_thresh) {
2559                 int push_size;
2560
2561                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2562                                         bp->tx_push_thresh);
2563
2564                 if (push_size > 256) {
2565                         push_size = 0;
2566                         bp->tx_push_thresh = 0;
2567                 }
2568
2569                 bp->tx_push_size = push_size;
2570         }
2571
2572         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2573                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2574                 struct bnxt_ring_struct *ring;
2575                 u8 qidx;
2576
2577                 ring = &txr->tx_ring_struct;
2578
2579                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2580                 if (rc)
2581                         return rc;
2582
2583                 ring->grp_idx = txr->bnapi->index;
2584                 if (bp->tx_push_size) {
2585                         dma_addr_t mapping;
2586
2587                         /* One pre-allocated DMA buffer to backup
2588                          * TX push operation
2589                          */
2590                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2591                                                 bp->tx_push_size,
2592                                                 &txr->tx_push_mapping,
2593                                                 GFP_KERNEL);
2594
2595                         if (!txr->tx_push)
2596                                 return -ENOMEM;
2597
2598                         mapping = txr->tx_push_mapping +
2599                                 sizeof(struct tx_push_bd);
2600                         txr->data_mapping = cpu_to_le64(mapping);
2601
2602                         memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
2603                 }
2604                 qidx = bp->tc_to_qidx[j];
2605                 ring->queue_id = bp->q_info[qidx].queue_id;
2606                 if (i < bp->tx_nr_rings_xdp)
2607                         continue;
2608                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2609                         j++;
2610         }
2611         return 0;
2612 }
2613
2614 static void bnxt_free_cp_rings(struct bnxt *bp)
2615 {
2616         int i;
2617
2618         if (!bp->bnapi)
2619                 return;
2620
2621         for (i = 0; i < bp->cp_nr_rings; i++) {
2622                 struct bnxt_napi *bnapi = bp->bnapi[i];
2623                 struct bnxt_cp_ring_info *cpr;
2624                 struct bnxt_ring_struct *ring;
2625                 int j;
2626
2627                 if (!bnapi)
2628                         continue;
2629
2630                 cpr = &bnapi->cp_ring;
2631                 ring = &cpr->cp_ring_struct;
2632
2633                 bnxt_free_ring(bp, &ring->ring_mem);
2634
2635                 for (j = 0; j < 2; j++) {
2636                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2637
2638                         if (cpr2) {
2639                                 ring = &cpr2->cp_ring_struct;
2640                                 bnxt_free_ring(bp, &ring->ring_mem);
2641                                 kfree(cpr2);
2642                                 cpr->cp_ring_arr[j] = NULL;
2643                         }
2644                 }
2645         }
2646 }
2647
2648 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2649 {
2650         struct bnxt_ring_mem_info *rmem;
2651         struct bnxt_ring_struct *ring;
2652         struct bnxt_cp_ring_info *cpr;
2653         int rc;
2654
2655         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2656         if (!cpr)
2657                 return NULL;
2658
2659         ring = &cpr->cp_ring_struct;
2660         rmem = &ring->ring_mem;
2661         rmem->nr_pages = bp->cp_nr_pages;
2662         rmem->page_size = HW_CMPD_RING_SIZE;
2663         rmem->pg_arr = (void **)cpr->cp_desc_ring;
2664         rmem->dma_arr = cpr->cp_desc_mapping;
2665         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
2666         rc = bnxt_alloc_ring(bp, rmem);
2667         if (rc) {
2668                 bnxt_free_ring(bp, rmem);
2669                 kfree(cpr);
2670                 cpr = NULL;
2671         }
2672         return cpr;
2673 }
2674
2675 static int bnxt_alloc_cp_rings(struct bnxt *bp)
2676 {
2677         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
2678         int i, rc, ulp_base_vec, ulp_msix;
2679
2680         ulp_msix = bnxt_get_ulp_msix_num(bp);
2681         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
2682         for (i = 0; i < bp->cp_nr_rings; i++) {
2683                 struct bnxt_napi *bnapi = bp->bnapi[i];
2684                 struct bnxt_cp_ring_info *cpr;
2685                 struct bnxt_ring_struct *ring;
2686
2687                 if (!bnapi)
2688                         continue;
2689
2690                 cpr = &bnapi->cp_ring;
2691                 cpr->bnapi = bnapi;
2692                 ring = &cpr->cp_ring_struct;
2693
2694                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2695                 if (rc)
2696                         return rc;
2697
2698                 if (ulp_msix && i >= ulp_base_vec)
2699                         ring->map_idx = i + ulp_msix;
2700                 else
2701                         ring->map_idx = i;
2702
2703                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2704                         continue;
2705
2706                 if (i < bp->rx_nr_rings) {
2707                         struct bnxt_cp_ring_info *cpr2 =
2708                                 bnxt_alloc_cp_sub_ring(bp);
2709
2710                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
2711                         if (!cpr2)
2712                                 return -ENOMEM;
2713                         cpr2->bnapi = bnapi;
2714                 }
2715                 if ((sh && i < bp->tx_nr_rings) ||
2716                     (!sh && i >= bp->rx_nr_rings)) {
2717                         struct bnxt_cp_ring_info *cpr2 =
2718                                 bnxt_alloc_cp_sub_ring(bp);
2719
2720                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
2721                         if (!cpr2)
2722                                 return -ENOMEM;
2723                         cpr2->bnapi = bnapi;
2724                 }
2725         }
2726         return 0;
2727 }
2728
2729 static void bnxt_init_ring_struct(struct bnxt *bp)
2730 {
2731         int i;
2732
2733         for (i = 0; i < bp->cp_nr_rings; i++) {
2734                 struct bnxt_napi *bnapi = bp->bnapi[i];
2735                 struct bnxt_ring_mem_info *rmem;
2736                 struct bnxt_cp_ring_info *cpr;
2737                 struct bnxt_rx_ring_info *rxr;
2738                 struct bnxt_tx_ring_info *txr;
2739                 struct bnxt_ring_struct *ring;
2740
2741                 if (!bnapi)
2742                         continue;
2743
2744                 cpr = &bnapi->cp_ring;
2745                 ring = &cpr->cp_ring_struct;
2746                 rmem = &ring->ring_mem;
2747                 rmem->nr_pages = bp->cp_nr_pages;
2748                 rmem->page_size = HW_CMPD_RING_SIZE;
2749                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2750                 rmem->dma_arr = cpr->cp_desc_mapping;
2751                 rmem->vmem_size = 0;
2752
2753                 rxr = bnapi->rx_ring;
2754                 if (!rxr)
2755                         goto skip_rx;
2756
2757                 ring = &rxr->rx_ring_struct;
2758                 rmem = &ring->ring_mem;
2759                 rmem->nr_pages = bp->rx_nr_pages;
2760                 rmem->page_size = HW_RXBD_RING_SIZE;
2761                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
2762                 rmem->dma_arr = rxr->rx_desc_mapping;
2763                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2764                 rmem->vmem = (void **)&rxr->rx_buf_ring;
2765
2766                 ring = &rxr->rx_agg_ring_struct;
2767                 rmem = &ring->ring_mem;
2768                 rmem->nr_pages = bp->rx_agg_nr_pages;
2769                 rmem->page_size = HW_RXBD_RING_SIZE;
2770                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
2771                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
2772                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2773                 rmem->vmem = (void **)&rxr->rx_agg_ring;
2774
2775 skip_rx:
2776                 txr = bnapi->tx_ring;
2777                 if (!txr)
2778                         continue;
2779
2780                 ring = &txr->tx_ring_struct;
2781                 rmem = &ring->ring_mem;
2782                 rmem->nr_pages = bp->tx_nr_pages;
2783                 rmem->page_size = HW_RXBD_RING_SIZE;
2784                 rmem->pg_arr = (void **)txr->tx_desc_ring;
2785                 rmem->dma_arr = txr->tx_desc_mapping;
2786                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2787                 rmem->vmem = (void **)&txr->tx_buf_ring;
2788         }
2789 }
2790
2791 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2792 {
2793         int i;
2794         u32 prod;
2795         struct rx_bd **rx_buf_ring;
2796
2797         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
2798         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
2799                 int j;
2800                 struct rx_bd *rxbd;
2801
2802                 rxbd = rx_buf_ring[i];
2803                 if (!rxbd)
2804                         continue;
2805
2806                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2807                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2808                         rxbd->rx_bd_opaque = prod;
2809                 }
2810         }
2811 }
2812
2813 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2814 {
2815         struct net_device *dev = bp->dev;
2816         struct bnxt_rx_ring_info *rxr;
2817         struct bnxt_ring_struct *ring;
2818         u32 prod, type;
2819         int i;
2820
2821         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2822                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2823
2824         if (NET_IP_ALIGN == 2)
2825                 type |= RX_BD_FLAGS_SOP;
2826
2827         rxr = &bp->rx_ring[ring_nr];
2828         ring = &rxr->rx_ring_struct;
2829         bnxt_init_rxbd_pages(ring, type);
2830
2831         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2832                 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2833                 if (IS_ERR(rxr->xdp_prog)) {
2834                         int rc = PTR_ERR(rxr->xdp_prog);
2835
2836                         rxr->xdp_prog = NULL;
2837                         return rc;
2838                 }
2839         }
2840         prod = rxr->rx_prod;
2841         for (i = 0; i < bp->rx_ring_size; i++) {
2842                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2843                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2844                                     ring_nr, i, bp->rx_ring_size);
2845                         break;
2846                 }
2847                 prod = NEXT_RX(prod);
2848         }
2849         rxr->rx_prod = prod;
2850         ring->fw_ring_id = INVALID_HW_RING_ID;
2851
2852         ring = &rxr->rx_agg_ring_struct;
2853         ring->fw_ring_id = INVALID_HW_RING_ID;
2854
2855         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2856                 return 0;
2857
2858         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2859                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2860
2861         bnxt_init_rxbd_pages(ring, type);
2862
2863         prod = rxr->rx_agg_prod;
2864         for (i = 0; i < bp->rx_agg_ring_size; i++) {
2865                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2866                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2867                                     ring_nr, i, bp->rx_ring_size);
2868                         break;
2869                 }
2870                 prod = NEXT_RX_AGG(prod);
2871         }
2872         rxr->rx_agg_prod = prod;
2873
2874         if (bp->flags & BNXT_FLAG_TPA) {
2875                 if (rxr->rx_tpa) {
2876                         u8 *data;
2877                         dma_addr_t mapping;
2878
2879                         for (i = 0; i < MAX_TPA; i++) {
2880                                 data = __bnxt_alloc_rx_data(bp, &mapping,
2881                                                             GFP_KERNEL);
2882                                 if (!data)
2883                                         return -ENOMEM;
2884
2885                                 rxr->rx_tpa[i].data = data;
2886                                 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
2887                                 rxr->rx_tpa[i].mapping = mapping;
2888                         }
2889                 } else {
2890                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2891                         return -ENOMEM;
2892                 }
2893         }
2894
2895         return 0;
2896 }
2897
2898 static void bnxt_init_cp_rings(struct bnxt *bp)
2899 {
2900         int i, j;
2901
2902         for (i = 0; i < bp->cp_nr_rings; i++) {
2903                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2904                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2905
2906                 ring->fw_ring_id = INVALID_HW_RING_ID;
2907                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2908                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2909                 for (j = 0; j < 2; j++) {
2910                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2911
2912                         if (!cpr2)
2913                                 continue;
2914
2915                         ring = &cpr2->cp_ring_struct;
2916                         ring->fw_ring_id = INVALID_HW_RING_ID;
2917                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2918                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2919                 }
2920         }
2921 }
2922
2923 static int bnxt_init_rx_rings(struct bnxt *bp)
2924 {
2925         int i, rc = 0;
2926
2927         if (BNXT_RX_PAGE_MODE(bp)) {
2928                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2929                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
2930         } else {
2931                 bp->rx_offset = BNXT_RX_OFFSET;
2932                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2933         }
2934
2935         for (i = 0; i < bp->rx_nr_rings; i++) {
2936                 rc = bnxt_init_one_rx_ring(bp, i);
2937                 if (rc)
2938                         break;
2939         }
2940
2941         return rc;
2942 }
2943
2944 static int bnxt_init_tx_rings(struct bnxt *bp)
2945 {
2946         u16 i;
2947
2948         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2949                                    MAX_SKB_FRAGS + 1);
2950
2951         for (i = 0; i < bp->tx_nr_rings; i++) {
2952                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2953                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2954
2955                 ring->fw_ring_id = INVALID_HW_RING_ID;