Merge tag 'drm-misc-fixes-2017-07-27' of git://anongit.freedesktop.org/git/drm-misc...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2017 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/if.h>
35 #include <linux/if_vlan.h>
36 #include <linux/rtc.h>
37 #include <linux/bpf.h>
38 #include <net/ip.h>
39 #include <net/tcp.h>
40 #include <net/udp.h>
41 #include <net/checksum.h>
42 #include <net/ip6_checksum.h>
43 #include <net/udp_tunnel.h>
44 #include <linux/workqueue.h>
45 #include <linux/prefetch.h>
46 #include <linux/cache.h>
47 #include <linux/log2.h>
48 #include <linux/aer.h>
49 #include <linux/bitmap.h>
50 #include <linux/cpu_rmap.h>
51
52 #include "bnxt_hsi.h"
53 #include "bnxt.h"
54 #include "bnxt_ulp.h"
55 #include "bnxt_sriov.h"
56 #include "bnxt_ethtool.h"
57 #include "bnxt_dcb.h"
58 #include "bnxt_xdp.h"
59
60 #define BNXT_TX_TIMEOUT         (5 * HZ)
61
62 static const char version[] =
63         "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
64
65 MODULE_LICENSE("GPL");
66 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
67 MODULE_VERSION(DRV_MODULE_VERSION);
68
69 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
70 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
71 #define BNXT_RX_COPY_THRESH 256
72
73 #define BNXT_TX_PUSH_THRESH 164
74
75 enum board_idx {
76         BCM57301,
77         BCM57302,
78         BCM57304,
79         BCM57417_NPAR,
80         BCM58700,
81         BCM57311,
82         BCM57312,
83         BCM57402,
84         BCM57404,
85         BCM57406,
86         BCM57402_NPAR,
87         BCM57407,
88         BCM57412,
89         BCM57414,
90         BCM57416,
91         BCM57417,
92         BCM57412_NPAR,
93         BCM57314,
94         BCM57417_SFP,
95         BCM57416_SFP,
96         BCM57404_NPAR,
97         BCM57406_NPAR,
98         BCM57407_SFP,
99         BCM57407_NPAR,
100         BCM57414_NPAR,
101         BCM57416_NPAR,
102         BCM57452,
103         BCM57454,
104         NETXTREME_E_VF,
105         NETXTREME_C_VF,
106 };
107
108 /* indexed by enum above */
109 static const struct {
110         char *name;
111 } board_info[] = {
112         { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
113         { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
114         { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
115         { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
116         { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
117         { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
118         { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
119         { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
120         { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
121         { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
122         { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
123         { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
124         { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
125         { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
126         { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
127         { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
128         { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
129         { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
130         { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
131         { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
132         { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
133         { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
134         { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
135         { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
136         { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
137         { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
138         { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
139         { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
140         { "Broadcom NetXtreme-E Ethernet Virtual Function" },
141         { "Broadcom NetXtreme-C Ethernet Virtual Function" },
142 };
143
144 static const struct pci_device_id bnxt_pci_tbl[] = {
145         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
146         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
147         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
148         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
149         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
150         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
151         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
152         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
153         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
154         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
155         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
156         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
157         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
158         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
159         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
160         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
161         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
162         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
163         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
164         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
165         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
166         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
167         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
168         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
169         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
170         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
171         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
172         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
173         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
174         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
175         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
176         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
177 #ifdef CONFIG_BNXT_SRIOV
178         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
179         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
180         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
181         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
182         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
183         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
184         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
185         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
186 #endif
187         { 0 }
188 };
189
190 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
191
192 static const u16 bnxt_vf_req_snif[] = {
193         HWRM_FUNC_CFG,
194         HWRM_PORT_PHY_QCFG,
195         HWRM_CFA_L2_FILTER_ALLOC,
196 };
197
198 static const u16 bnxt_async_events_arr[] = {
199         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
200         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
201         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
202         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
203         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
204 };
205
206 static bool bnxt_vf_pciid(enum board_idx idx)
207 {
208         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
209 }
210
211 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
212 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
213 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
214
215 #define BNXT_CP_DB_REARM(db, raw_cons)                                  \
216                 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
217
218 #define BNXT_CP_DB(db, raw_cons)                                        \
219                 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
220
221 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
222                 writel(DB_CP_IRQ_DIS_FLAGS, db)
223
224 const u16 bnxt_lhint_arr[] = {
225         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
226         TX_BD_FLAGS_LHINT_512_TO_1023,
227         TX_BD_FLAGS_LHINT_1024_TO_2047,
228         TX_BD_FLAGS_LHINT_1024_TO_2047,
229         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
230         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
231         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
232         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
233         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
234         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
235         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
236         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
237         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
238         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
239         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
240         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
241         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
242         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
243         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
244 };
245
246 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
247 {
248         struct bnxt *bp = netdev_priv(dev);
249         struct tx_bd *txbd;
250         struct tx_bd_ext *txbd1;
251         struct netdev_queue *txq;
252         int i;
253         dma_addr_t mapping;
254         unsigned int length, pad = 0;
255         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
256         u16 prod, last_frag;
257         struct pci_dev *pdev = bp->pdev;
258         struct bnxt_tx_ring_info *txr;
259         struct bnxt_sw_tx_bd *tx_buf;
260
261         i = skb_get_queue_mapping(skb);
262         if (unlikely(i >= bp->tx_nr_rings)) {
263                 dev_kfree_skb_any(skb);
264                 return NETDEV_TX_OK;
265         }
266
267         txq = netdev_get_tx_queue(dev, i);
268         txr = &bp->tx_ring[bp->tx_ring_map[i]];
269         prod = txr->tx_prod;
270
271         free_size = bnxt_tx_avail(bp, txr);
272         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
273                 netif_tx_stop_queue(txq);
274                 return NETDEV_TX_BUSY;
275         }
276
277         length = skb->len;
278         len = skb_headlen(skb);
279         last_frag = skb_shinfo(skb)->nr_frags;
280
281         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
282
283         txbd->tx_bd_opaque = prod;
284
285         tx_buf = &txr->tx_buf_ring[prod];
286         tx_buf->skb = skb;
287         tx_buf->nr_frags = last_frag;
288
289         vlan_tag_flags = 0;
290         cfa_action = 0;
291         if (skb_vlan_tag_present(skb)) {
292                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
293                                  skb_vlan_tag_get(skb);
294                 /* Currently supports 8021Q, 8021AD vlan offloads
295                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
296                  */
297                 if (skb->vlan_proto == htons(ETH_P_8021Q))
298                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
299         }
300
301         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
302                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
303                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
304                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
305                 void *pdata = tx_push_buf->data;
306                 u64 *end;
307                 int j, push_len;
308
309                 /* Set COAL_NOW to be ready quickly for the next push */
310                 tx_push->tx_bd_len_flags_type =
311                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
312                                         TX_BD_TYPE_LONG_TX_BD |
313                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
314                                         TX_BD_FLAGS_COAL_NOW |
315                                         TX_BD_FLAGS_PACKET_END |
316                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
317
318                 if (skb->ip_summed == CHECKSUM_PARTIAL)
319                         tx_push1->tx_bd_hsize_lflags =
320                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
321                 else
322                         tx_push1->tx_bd_hsize_lflags = 0;
323
324                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
325                 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
326
327                 end = pdata + length;
328                 end = PTR_ALIGN(end, 8) - 1;
329                 *end = 0;
330
331                 skb_copy_from_linear_data(skb, pdata, len);
332                 pdata += len;
333                 for (j = 0; j < last_frag; j++) {
334                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
335                         void *fptr;
336
337                         fptr = skb_frag_address_safe(frag);
338                         if (!fptr)
339                                 goto normal_tx;
340
341                         memcpy(pdata, fptr, skb_frag_size(frag));
342                         pdata += skb_frag_size(frag);
343                 }
344
345                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
346                 txbd->tx_bd_haddr = txr->data_mapping;
347                 prod = NEXT_TX(prod);
348                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
349                 memcpy(txbd, tx_push1, sizeof(*txbd));
350                 prod = NEXT_TX(prod);
351                 tx_push->doorbell =
352                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
353                 txr->tx_prod = prod;
354
355                 tx_buf->is_push = 1;
356                 netdev_tx_sent_queue(txq, skb->len);
357                 wmb();  /* Sync is_push and byte queue before pushing data */
358
359                 push_len = (length + sizeof(*tx_push) + 7) / 8;
360                 if (push_len > 16) {
361                         __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
362                         __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
363                                          (push_len - 16) << 1);
364                 } else {
365                         __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
366                                          push_len);
367                 }
368
369                 goto tx_done;
370         }
371
372 normal_tx:
373         if (length < BNXT_MIN_PKT_SIZE) {
374                 pad = BNXT_MIN_PKT_SIZE - length;
375                 if (skb_pad(skb, pad)) {
376                         /* SKB already freed. */
377                         tx_buf->skb = NULL;
378                         return NETDEV_TX_OK;
379                 }
380                 length = BNXT_MIN_PKT_SIZE;
381         }
382
383         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
384
385         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
386                 dev_kfree_skb_any(skb);
387                 tx_buf->skb = NULL;
388                 return NETDEV_TX_OK;
389         }
390
391         dma_unmap_addr_set(tx_buf, mapping, mapping);
392         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
393                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
394
395         txbd->tx_bd_haddr = cpu_to_le64(mapping);
396
397         prod = NEXT_TX(prod);
398         txbd1 = (struct tx_bd_ext *)
399                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
400
401         txbd1->tx_bd_hsize_lflags = 0;
402         if (skb_is_gso(skb)) {
403                 u32 hdr_len;
404
405                 if (skb->encapsulation)
406                         hdr_len = skb_inner_network_offset(skb) +
407                                 skb_inner_network_header_len(skb) +
408                                 inner_tcp_hdrlen(skb);
409                 else
410                         hdr_len = skb_transport_offset(skb) +
411                                 tcp_hdrlen(skb);
412
413                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
414                                         TX_BD_FLAGS_T_IPID |
415                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
416                 length = skb_shinfo(skb)->gso_size;
417                 txbd1->tx_bd_mss = cpu_to_le32(length);
418                 length += hdr_len;
419         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
420                 txbd1->tx_bd_hsize_lflags =
421                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
422                 txbd1->tx_bd_mss = 0;
423         }
424
425         length >>= 9;
426         flags |= bnxt_lhint_arr[length];
427         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
428
429         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
430         txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
431         for (i = 0; i < last_frag; i++) {
432                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
433
434                 prod = NEXT_TX(prod);
435                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
436
437                 len = skb_frag_size(frag);
438                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
439                                            DMA_TO_DEVICE);
440
441                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
442                         goto tx_dma_error;
443
444                 tx_buf = &txr->tx_buf_ring[prod];
445                 dma_unmap_addr_set(tx_buf, mapping, mapping);
446
447                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
448
449                 flags = len << TX_BD_LEN_SHIFT;
450                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
451         }
452
453         flags &= ~TX_BD_LEN;
454         txbd->tx_bd_len_flags_type =
455                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
456                             TX_BD_FLAGS_PACKET_END);
457
458         netdev_tx_sent_queue(txq, skb->len);
459
460         /* Sync BD data before updating doorbell */
461         wmb();
462
463         prod = NEXT_TX(prod);
464         txr->tx_prod = prod;
465
466         if (!skb->xmit_more || netif_xmit_stopped(txq))
467                 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
468
469 tx_done:
470
471         mmiowb();
472
473         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
474                 if (skb->xmit_more && !tx_buf->is_push)
475                         bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
476
477                 netif_tx_stop_queue(txq);
478
479                 /* netif_tx_stop_queue() must be done before checking
480                  * tx index in bnxt_tx_avail() below, because in
481                  * bnxt_tx_int(), we update tx index before checking for
482                  * netif_tx_queue_stopped().
483                  */
484                 smp_mb();
485                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
486                         netif_tx_wake_queue(txq);
487         }
488         return NETDEV_TX_OK;
489
490 tx_dma_error:
491         last_frag = i;
492
493         /* start back at beginning and unmap skb */
494         prod = txr->tx_prod;
495         tx_buf = &txr->tx_buf_ring[prod];
496         tx_buf->skb = NULL;
497         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
498                          skb_headlen(skb), PCI_DMA_TODEVICE);
499         prod = NEXT_TX(prod);
500
501         /* unmap remaining mapped pages */
502         for (i = 0; i < last_frag; i++) {
503                 prod = NEXT_TX(prod);
504                 tx_buf = &txr->tx_buf_ring[prod];
505                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
506                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
507                                PCI_DMA_TODEVICE);
508         }
509
510         dev_kfree_skb_any(skb);
511         return NETDEV_TX_OK;
512 }
513
514 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
515 {
516         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
517         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
518         u16 cons = txr->tx_cons;
519         struct pci_dev *pdev = bp->pdev;
520         int i;
521         unsigned int tx_bytes = 0;
522
523         for (i = 0; i < nr_pkts; i++) {
524                 struct bnxt_sw_tx_bd *tx_buf;
525                 struct sk_buff *skb;
526                 int j, last;
527
528                 tx_buf = &txr->tx_buf_ring[cons];
529                 cons = NEXT_TX(cons);
530                 skb = tx_buf->skb;
531                 tx_buf->skb = NULL;
532
533                 if (tx_buf->is_push) {
534                         tx_buf->is_push = 0;
535                         goto next_tx_int;
536                 }
537
538                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
539                                  skb_headlen(skb), PCI_DMA_TODEVICE);
540                 last = tx_buf->nr_frags;
541
542                 for (j = 0; j < last; j++) {
543                         cons = NEXT_TX(cons);
544                         tx_buf = &txr->tx_buf_ring[cons];
545                         dma_unmap_page(
546                                 &pdev->dev,
547                                 dma_unmap_addr(tx_buf, mapping),
548                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
549                                 PCI_DMA_TODEVICE);
550                 }
551
552 next_tx_int:
553                 cons = NEXT_TX(cons);
554
555                 tx_bytes += skb->len;
556                 dev_kfree_skb_any(skb);
557         }
558
559         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
560         txr->tx_cons = cons;
561
562         /* Need to make the tx_cons update visible to bnxt_start_xmit()
563          * before checking for netif_tx_queue_stopped().  Without the
564          * memory barrier, there is a small possibility that bnxt_start_xmit()
565          * will miss it and cause the queue to be stopped forever.
566          */
567         smp_mb();
568
569         if (unlikely(netif_tx_queue_stopped(txq)) &&
570             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
571                 __netif_tx_lock(txq, smp_processor_id());
572                 if (netif_tx_queue_stopped(txq) &&
573                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
574                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
575                         netif_tx_wake_queue(txq);
576                 __netif_tx_unlock(txq);
577         }
578 }
579
580 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
581                                          gfp_t gfp)
582 {
583         struct device *dev = &bp->pdev->dev;
584         struct page *page;
585
586         page = alloc_page(gfp);
587         if (!page)
588                 return NULL;
589
590         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
591                                       DMA_ATTR_WEAK_ORDERING);
592         if (dma_mapping_error(dev, *mapping)) {
593                 __free_page(page);
594                 return NULL;
595         }
596         *mapping += bp->rx_dma_offset;
597         return page;
598 }
599
600 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
601                                        gfp_t gfp)
602 {
603         u8 *data;
604         struct pci_dev *pdev = bp->pdev;
605
606         data = kmalloc(bp->rx_buf_size, gfp);
607         if (!data)
608                 return NULL;
609
610         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
611                                         bp->rx_buf_use_size, bp->rx_dir,
612                                         DMA_ATTR_WEAK_ORDERING);
613
614         if (dma_mapping_error(&pdev->dev, *mapping)) {
615                 kfree(data);
616                 data = NULL;
617         }
618         return data;
619 }
620
621 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
622                        u16 prod, gfp_t gfp)
623 {
624         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
625         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
626         dma_addr_t mapping;
627
628         if (BNXT_RX_PAGE_MODE(bp)) {
629                 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
630
631                 if (!page)
632                         return -ENOMEM;
633
634                 rx_buf->data = page;
635                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
636         } else {
637                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
638
639                 if (!data)
640                         return -ENOMEM;
641
642                 rx_buf->data = data;
643                 rx_buf->data_ptr = data + bp->rx_offset;
644         }
645         rx_buf->mapping = mapping;
646
647         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
648         return 0;
649 }
650
651 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
652 {
653         u16 prod = rxr->rx_prod;
654         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
655         struct rx_bd *cons_bd, *prod_bd;
656
657         prod_rx_buf = &rxr->rx_buf_ring[prod];
658         cons_rx_buf = &rxr->rx_buf_ring[cons];
659
660         prod_rx_buf->data = data;
661         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
662
663         prod_rx_buf->mapping = cons_rx_buf->mapping;
664
665         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
666         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
667
668         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
669 }
670
671 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
672 {
673         u16 next, max = rxr->rx_agg_bmap_size;
674
675         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
676         if (next >= max)
677                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
678         return next;
679 }
680
681 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
682                                      struct bnxt_rx_ring_info *rxr,
683                                      u16 prod, gfp_t gfp)
684 {
685         struct rx_bd *rxbd =
686                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
687         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
688         struct pci_dev *pdev = bp->pdev;
689         struct page *page;
690         dma_addr_t mapping;
691         u16 sw_prod = rxr->rx_sw_agg_prod;
692         unsigned int offset = 0;
693
694         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
695                 page = rxr->rx_page;
696                 if (!page) {
697                         page = alloc_page(gfp);
698                         if (!page)
699                                 return -ENOMEM;
700                         rxr->rx_page = page;
701                         rxr->rx_page_offset = 0;
702                 }
703                 offset = rxr->rx_page_offset;
704                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
705                 if (rxr->rx_page_offset == PAGE_SIZE)
706                         rxr->rx_page = NULL;
707                 else
708                         get_page(page);
709         } else {
710                 page = alloc_page(gfp);
711                 if (!page)
712                         return -ENOMEM;
713         }
714
715         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
716                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
717                                      DMA_ATTR_WEAK_ORDERING);
718         if (dma_mapping_error(&pdev->dev, mapping)) {
719                 __free_page(page);
720                 return -EIO;
721         }
722
723         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
724                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
725
726         __set_bit(sw_prod, rxr->rx_agg_bmap);
727         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
728         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
729
730         rx_agg_buf->page = page;
731         rx_agg_buf->offset = offset;
732         rx_agg_buf->mapping = mapping;
733         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
734         rxbd->rx_bd_opaque = sw_prod;
735         return 0;
736 }
737
738 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
739                                    u32 agg_bufs)
740 {
741         struct bnxt *bp = bnapi->bp;
742         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
743         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
744         u16 prod = rxr->rx_agg_prod;
745         u16 sw_prod = rxr->rx_sw_agg_prod;
746         u32 i;
747
748         for (i = 0; i < agg_bufs; i++) {
749                 u16 cons;
750                 struct rx_agg_cmp *agg;
751                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
752                 struct rx_bd *prod_bd;
753                 struct page *page;
754
755                 agg = (struct rx_agg_cmp *)
756                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
757                 cons = agg->rx_agg_cmp_opaque;
758                 __clear_bit(cons, rxr->rx_agg_bmap);
759
760                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
761                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
762
763                 __set_bit(sw_prod, rxr->rx_agg_bmap);
764                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
765                 cons_rx_buf = &rxr->rx_agg_ring[cons];
766
767                 /* It is possible for sw_prod to be equal to cons, so
768                  * set cons_rx_buf->page to NULL first.
769                  */
770                 page = cons_rx_buf->page;
771                 cons_rx_buf->page = NULL;
772                 prod_rx_buf->page = page;
773                 prod_rx_buf->offset = cons_rx_buf->offset;
774
775                 prod_rx_buf->mapping = cons_rx_buf->mapping;
776
777                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
778
779                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
780                 prod_bd->rx_bd_opaque = sw_prod;
781
782                 prod = NEXT_RX_AGG(prod);
783                 sw_prod = NEXT_RX_AGG(sw_prod);
784                 cp_cons = NEXT_CMP(cp_cons);
785         }
786         rxr->rx_agg_prod = prod;
787         rxr->rx_sw_agg_prod = sw_prod;
788 }
789
790 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
791                                         struct bnxt_rx_ring_info *rxr,
792                                         u16 cons, void *data, u8 *data_ptr,
793                                         dma_addr_t dma_addr,
794                                         unsigned int offset_and_len)
795 {
796         unsigned int payload = offset_and_len >> 16;
797         unsigned int len = offset_and_len & 0xffff;
798         struct skb_frag_struct *frag;
799         struct page *page = data;
800         u16 prod = rxr->rx_prod;
801         struct sk_buff *skb;
802         int off, err;
803
804         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
805         if (unlikely(err)) {
806                 bnxt_reuse_rx_data(rxr, cons, data);
807                 return NULL;
808         }
809         dma_addr -= bp->rx_dma_offset;
810         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
811                              DMA_ATTR_WEAK_ORDERING);
812
813         if (unlikely(!payload))
814                 payload = eth_get_headlen(data_ptr, len);
815
816         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
817         if (!skb) {
818                 __free_page(page);
819                 return NULL;
820         }
821
822         off = (void *)data_ptr - page_address(page);
823         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
824         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
825                payload + NET_IP_ALIGN);
826
827         frag = &skb_shinfo(skb)->frags[0];
828         skb_frag_size_sub(frag, payload);
829         frag->page_offset += payload;
830         skb->data_len -= payload;
831         skb->tail += payload;
832
833         return skb;
834 }
835
836 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
837                                    struct bnxt_rx_ring_info *rxr, u16 cons,
838                                    void *data, u8 *data_ptr,
839                                    dma_addr_t dma_addr,
840                                    unsigned int offset_and_len)
841 {
842         u16 prod = rxr->rx_prod;
843         struct sk_buff *skb;
844         int err;
845
846         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
847         if (unlikely(err)) {
848                 bnxt_reuse_rx_data(rxr, cons, data);
849                 return NULL;
850         }
851
852         skb = build_skb(data, 0);
853         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
854                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
855         if (!skb) {
856                 kfree(data);
857                 return NULL;
858         }
859
860         skb_reserve(skb, bp->rx_offset);
861         skb_put(skb, offset_and_len & 0xffff);
862         return skb;
863 }
864
865 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
866                                      struct sk_buff *skb, u16 cp_cons,
867                                      u32 agg_bufs)
868 {
869         struct pci_dev *pdev = bp->pdev;
870         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
871         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
872         u16 prod = rxr->rx_agg_prod;
873         u32 i;
874
875         for (i = 0; i < agg_bufs; i++) {
876                 u16 cons, frag_len;
877                 struct rx_agg_cmp *agg;
878                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
879                 struct page *page;
880                 dma_addr_t mapping;
881
882                 agg = (struct rx_agg_cmp *)
883                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
884                 cons = agg->rx_agg_cmp_opaque;
885                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
886                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
887
888                 cons_rx_buf = &rxr->rx_agg_ring[cons];
889                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
890                                    cons_rx_buf->offset, frag_len);
891                 __clear_bit(cons, rxr->rx_agg_bmap);
892
893                 /* It is possible for bnxt_alloc_rx_page() to allocate
894                  * a sw_prod index that equals the cons index, so we
895                  * need to clear the cons entry now.
896                  */
897                 mapping = cons_rx_buf->mapping;
898                 page = cons_rx_buf->page;
899                 cons_rx_buf->page = NULL;
900
901                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
902                         struct skb_shared_info *shinfo;
903                         unsigned int nr_frags;
904
905                         shinfo = skb_shinfo(skb);
906                         nr_frags = --shinfo->nr_frags;
907                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
908
909                         dev_kfree_skb(skb);
910
911                         cons_rx_buf->page = page;
912
913                         /* Update prod since possibly some pages have been
914                          * allocated already.
915                          */
916                         rxr->rx_agg_prod = prod;
917                         bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
918                         return NULL;
919                 }
920
921                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
922                                      PCI_DMA_FROMDEVICE,
923                                      DMA_ATTR_WEAK_ORDERING);
924
925                 skb->data_len += frag_len;
926                 skb->len += frag_len;
927                 skb->truesize += PAGE_SIZE;
928
929                 prod = NEXT_RX_AGG(prod);
930                 cp_cons = NEXT_CMP(cp_cons);
931         }
932         rxr->rx_agg_prod = prod;
933         return skb;
934 }
935
936 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
937                                u8 agg_bufs, u32 *raw_cons)
938 {
939         u16 last;
940         struct rx_agg_cmp *agg;
941
942         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
943         last = RING_CMP(*raw_cons);
944         agg = (struct rx_agg_cmp *)
945                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
946         return RX_AGG_CMP_VALID(agg, *raw_cons);
947 }
948
949 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
950                                             unsigned int len,
951                                             dma_addr_t mapping)
952 {
953         struct bnxt *bp = bnapi->bp;
954         struct pci_dev *pdev = bp->pdev;
955         struct sk_buff *skb;
956
957         skb = napi_alloc_skb(&bnapi->napi, len);
958         if (!skb)
959                 return NULL;
960
961         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
962                                 bp->rx_dir);
963
964         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
965                len + NET_IP_ALIGN);
966
967         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
968                                    bp->rx_dir);
969
970         skb_put(skb, len);
971         return skb;
972 }
973
974 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
975                            u32 *raw_cons, void *cmp)
976 {
977         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
978         struct rx_cmp *rxcmp = cmp;
979         u32 tmp_raw_cons = *raw_cons;
980         u8 cmp_type, agg_bufs = 0;
981
982         cmp_type = RX_CMP_TYPE(rxcmp);
983
984         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
985                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
986                             RX_CMP_AGG_BUFS) >>
987                            RX_CMP_AGG_BUFS_SHIFT;
988         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
989                 struct rx_tpa_end_cmp *tpa_end = cmp;
990
991                 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
992                             RX_TPA_END_CMP_AGG_BUFS) >>
993                            RX_TPA_END_CMP_AGG_BUFS_SHIFT;
994         }
995
996         if (agg_bufs) {
997                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
998                         return -EBUSY;
999         }
1000         *raw_cons = tmp_raw_cons;
1001         return 0;
1002 }
1003
1004 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1005 {
1006         if (!rxr->bnapi->in_reset) {
1007                 rxr->bnapi->in_reset = true;
1008                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1009                 schedule_work(&bp->sp_task);
1010         }
1011         rxr->rx_next_cons = 0xffff;
1012 }
1013
1014 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1015                            struct rx_tpa_start_cmp *tpa_start,
1016                            struct rx_tpa_start_cmp_ext *tpa_start1)
1017 {
1018         u8 agg_id = TPA_START_AGG_ID(tpa_start);
1019         u16 cons, prod;
1020         struct bnxt_tpa_info *tpa_info;
1021         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1022         struct rx_bd *prod_bd;
1023         dma_addr_t mapping;
1024
1025         cons = tpa_start->rx_tpa_start_cmp_opaque;
1026         prod = rxr->rx_prod;
1027         cons_rx_buf = &rxr->rx_buf_ring[cons];
1028         prod_rx_buf = &rxr->rx_buf_ring[prod];
1029         tpa_info = &rxr->rx_tpa[agg_id];
1030
1031         if (unlikely(cons != rxr->rx_next_cons)) {
1032                 bnxt_sched_reset(bp, rxr);
1033                 return;
1034         }
1035
1036         prod_rx_buf->data = tpa_info->data;
1037         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1038
1039         mapping = tpa_info->mapping;
1040         prod_rx_buf->mapping = mapping;
1041
1042         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1043
1044         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1045
1046         tpa_info->data = cons_rx_buf->data;
1047         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1048         cons_rx_buf->data = NULL;
1049         tpa_info->mapping = cons_rx_buf->mapping;
1050
1051         tpa_info->len =
1052                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1053                                 RX_TPA_START_CMP_LEN_SHIFT;
1054         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1055                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1056
1057                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1058                 tpa_info->gso_type = SKB_GSO_TCPV4;
1059                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1060                 if (hash_type == 3)
1061                         tpa_info->gso_type = SKB_GSO_TCPV6;
1062                 tpa_info->rss_hash =
1063                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1064         } else {
1065                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1066                 tpa_info->gso_type = 0;
1067                 if (netif_msg_rx_err(bp))
1068                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1069         }
1070         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1071         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1072         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1073
1074         rxr->rx_prod = NEXT_RX(prod);
1075         cons = NEXT_RX(cons);
1076         rxr->rx_next_cons = NEXT_RX(cons);
1077         cons_rx_buf = &rxr->rx_buf_ring[cons];
1078
1079         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1080         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1081         cons_rx_buf->data = NULL;
1082 }
1083
1084 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
1085                            u16 cp_cons, u32 agg_bufs)
1086 {
1087         if (agg_bufs)
1088                 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1089 }
1090
1091 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1092                                            int payload_off, int tcp_ts,
1093                                            struct sk_buff *skb)
1094 {
1095 #ifdef CONFIG_INET
1096         struct tcphdr *th;
1097         int len, nw_off;
1098         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1099         u32 hdr_info = tpa_info->hdr_info;
1100         bool loopback = false;
1101
1102         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1103         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1104         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1105
1106         /* If the packet is an internal loopback packet, the offsets will
1107          * have an extra 4 bytes.
1108          */
1109         if (inner_mac_off == 4) {
1110                 loopback = true;
1111         } else if (inner_mac_off > 4) {
1112                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1113                                             ETH_HLEN - 2));
1114
1115                 /* We only support inner iPv4/ipv6.  If we don't see the
1116                  * correct protocol ID, it must be a loopback packet where
1117                  * the offsets are off by 4.
1118                  */
1119                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1120                         loopback = true;
1121         }
1122         if (loopback) {
1123                 /* internal loopback packet, subtract all offsets by 4 */
1124                 inner_ip_off -= 4;
1125                 inner_mac_off -= 4;
1126                 outer_ip_off -= 4;
1127         }
1128
1129         nw_off = inner_ip_off - ETH_HLEN;
1130         skb_set_network_header(skb, nw_off);
1131         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1132                 struct ipv6hdr *iph = ipv6_hdr(skb);
1133
1134                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1135                 len = skb->len - skb_transport_offset(skb);
1136                 th = tcp_hdr(skb);
1137                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1138         } else {
1139                 struct iphdr *iph = ip_hdr(skb);
1140
1141                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1142                 len = skb->len - skb_transport_offset(skb);
1143                 th = tcp_hdr(skb);
1144                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1145         }
1146
1147         if (inner_mac_off) { /* tunnel */
1148                 struct udphdr *uh = NULL;
1149                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1150                                             ETH_HLEN - 2));
1151
1152                 if (proto == htons(ETH_P_IP)) {
1153                         struct iphdr *iph = (struct iphdr *)skb->data;
1154
1155                         if (iph->protocol == IPPROTO_UDP)
1156                                 uh = (struct udphdr *)(iph + 1);
1157                 } else {
1158                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1159
1160                         if (iph->nexthdr == IPPROTO_UDP)
1161                                 uh = (struct udphdr *)(iph + 1);
1162                 }
1163                 if (uh) {
1164                         if (uh->check)
1165                                 skb_shinfo(skb)->gso_type |=
1166                                         SKB_GSO_UDP_TUNNEL_CSUM;
1167                         else
1168                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1169                 }
1170         }
1171 #endif
1172         return skb;
1173 }
1174
1175 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1176 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1177
1178 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1179                                            int payload_off, int tcp_ts,
1180                                            struct sk_buff *skb)
1181 {
1182 #ifdef CONFIG_INET
1183         struct tcphdr *th;
1184         int len, nw_off, tcp_opt_len = 0;
1185
1186         if (tcp_ts)
1187                 tcp_opt_len = 12;
1188
1189         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1190                 struct iphdr *iph;
1191
1192                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1193                          ETH_HLEN;
1194                 skb_set_network_header(skb, nw_off);
1195                 iph = ip_hdr(skb);
1196                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1197                 len = skb->len - skb_transport_offset(skb);
1198                 th = tcp_hdr(skb);
1199                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1200         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1201                 struct ipv6hdr *iph;
1202
1203                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1204                          ETH_HLEN;
1205                 skb_set_network_header(skb, nw_off);
1206                 iph = ipv6_hdr(skb);
1207                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1208                 len = skb->len - skb_transport_offset(skb);
1209                 th = tcp_hdr(skb);
1210                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1211         } else {
1212                 dev_kfree_skb_any(skb);
1213                 return NULL;
1214         }
1215
1216         if (nw_off) { /* tunnel */
1217                 struct udphdr *uh = NULL;
1218
1219                 if (skb->protocol == htons(ETH_P_IP)) {
1220                         struct iphdr *iph = (struct iphdr *)skb->data;
1221
1222                         if (iph->protocol == IPPROTO_UDP)
1223                                 uh = (struct udphdr *)(iph + 1);
1224                 } else {
1225                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1226
1227                         if (iph->nexthdr == IPPROTO_UDP)
1228                                 uh = (struct udphdr *)(iph + 1);
1229                 }
1230                 if (uh) {
1231                         if (uh->check)
1232                                 skb_shinfo(skb)->gso_type |=
1233                                         SKB_GSO_UDP_TUNNEL_CSUM;
1234                         else
1235                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1236                 }
1237         }
1238 #endif
1239         return skb;
1240 }
1241
1242 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1243                                            struct bnxt_tpa_info *tpa_info,
1244                                            struct rx_tpa_end_cmp *tpa_end,
1245                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1246                                            struct sk_buff *skb)
1247 {
1248 #ifdef CONFIG_INET
1249         int payload_off;
1250         u16 segs;
1251
1252         segs = TPA_END_TPA_SEGS(tpa_end);
1253         if (segs == 1)
1254                 return skb;
1255
1256         NAPI_GRO_CB(skb)->count = segs;
1257         skb_shinfo(skb)->gso_size =
1258                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1259         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1260         payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1261                        RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1262                       RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1263         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1264         if (likely(skb))
1265                 tcp_gro_complete(skb);
1266 #endif
1267         return skb;
1268 }
1269
1270 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1271                                            struct bnxt_napi *bnapi,
1272                                            u32 *raw_cons,
1273                                            struct rx_tpa_end_cmp *tpa_end,
1274                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1275                                            u8 *event)
1276 {
1277         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1278         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1279         u8 agg_id = TPA_END_AGG_ID(tpa_end);
1280         u8 *data_ptr, agg_bufs;
1281         u16 cp_cons = RING_CMP(*raw_cons);
1282         unsigned int len;
1283         struct bnxt_tpa_info *tpa_info;
1284         dma_addr_t mapping;
1285         struct sk_buff *skb;
1286         void *data;
1287
1288         if (unlikely(bnapi->in_reset)) {
1289                 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1290
1291                 if (rc < 0)
1292                         return ERR_PTR(-EBUSY);
1293                 return NULL;
1294         }
1295
1296         tpa_info = &rxr->rx_tpa[agg_id];
1297         data = tpa_info->data;
1298         data_ptr = tpa_info->data_ptr;
1299         prefetch(data_ptr);
1300         len = tpa_info->len;
1301         mapping = tpa_info->mapping;
1302
1303         agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1304                     RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1305
1306         if (agg_bufs) {
1307                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1308                         return ERR_PTR(-EBUSY);
1309
1310                 *event |= BNXT_AGG_EVENT;
1311                 cp_cons = NEXT_CMP(cp_cons);
1312         }
1313
1314         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1315                 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1316                 if (agg_bufs > MAX_SKB_FRAGS)
1317                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1318                                     agg_bufs, (int)MAX_SKB_FRAGS);
1319                 return NULL;
1320         }
1321
1322         if (len <= bp->rx_copy_thresh) {
1323                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1324                 if (!skb) {
1325                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1326                         return NULL;
1327                 }
1328         } else {
1329                 u8 *new_data;
1330                 dma_addr_t new_mapping;
1331
1332                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1333                 if (!new_data) {
1334                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1335                         return NULL;
1336                 }
1337
1338                 tpa_info->data = new_data;
1339                 tpa_info->data_ptr = new_data + bp->rx_offset;
1340                 tpa_info->mapping = new_mapping;
1341
1342                 skb = build_skb(data, 0);
1343                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1344                                        bp->rx_buf_use_size, bp->rx_dir,
1345                                        DMA_ATTR_WEAK_ORDERING);
1346
1347                 if (!skb) {
1348                         kfree(data);
1349                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1350                         return NULL;
1351                 }
1352                 skb_reserve(skb, bp->rx_offset);
1353                 skb_put(skb, len);
1354         }
1355
1356         if (agg_bufs) {
1357                 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1358                 if (!skb) {
1359                         /* Page reuse already handled by bnxt_rx_pages(). */
1360                         return NULL;
1361                 }
1362         }
1363         skb->protocol = eth_type_trans(skb, bp->dev);
1364
1365         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1366                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1367
1368         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1369             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1370                 u16 vlan_proto = tpa_info->metadata >>
1371                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1372                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
1373
1374                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1375         }
1376
1377         skb_checksum_none_assert(skb);
1378         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1379                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380                 skb->csum_level =
1381                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1382         }
1383
1384         if (TPA_END_GRO(tpa_end))
1385                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1386
1387         return skb;
1388 }
1389
1390 /* returns the following:
1391  * 1       - 1 packet successfully received
1392  * 0       - successful TPA_START, packet not completed yet
1393  * -EBUSY  - completion ring does not have all the agg buffers yet
1394  * -ENOMEM - packet aborted due to out of memory
1395  * -EIO    - packet aborted due to hw error indicated in BD
1396  */
1397 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1398                        u8 *event)
1399 {
1400         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1401         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1402         struct net_device *dev = bp->dev;
1403         struct rx_cmp *rxcmp;
1404         struct rx_cmp_ext *rxcmp1;
1405         u32 tmp_raw_cons = *raw_cons;
1406         u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1407         struct bnxt_sw_rx_bd *rx_buf;
1408         unsigned int len;
1409         u8 *data_ptr, agg_bufs, cmp_type;
1410         dma_addr_t dma_addr;
1411         struct sk_buff *skb;
1412         void *data;
1413         int rc = 0;
1414         u32 misc;
1415
1416         rxcmp = (struct rx_cmp *)
1417                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1418
1419         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1420         cp_cons = RING_CMP(tmp_raw_cons);
1421         rxcmp1 = (struct rx_cmp_ext *)
1422                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1423
1424         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1425                 return -EBUSY;
1426
1427         cmp_type = RX_CMP_TYPE(rxcmp);
1428
1429         prod = rxr->rx_prod;
1430
1431         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1432                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1433                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1434
1435                 *event |= BNXT_RX_EVENT;
1436                 goto next_rx_no_prod;
1437
1438         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1439                 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1440                                    (struct rx_tpa_end_cmp *)rxcmp,
1441                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1442
1443                 if (unlikely(IS_ERR(skb)))
1444                         return -EBUSY;
1445
1446                 rc = -ENOMEM;
1447                 if (likely(skb)) {
1448                         skb_record_rx_queue(skb, bnapi->index);
1449                         napi_gro_receive(&bnapi->napi, skb);
1450                         rc = 1;
1451                 }
1452                 *event |= BNXT_RX_EVENT;
1453                 goto next_rx_no_prod;
1454         }
1455
1456         cons = rxcmp->rx_cmp_opaque;
1457         rx_buf = &rxr->rx_buf_ring[cons];
1458         data = rx_buf->data;
1459         data_ptr = rx_buf->data_ptr;
1460         if (unlikely(cons != rxr->rx_next_cons)) {
1461                 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1462
1463                 bnxt_sched_reset(bp, rxr);
1464                 return rc1;
1465         }
1466         prefetch(data_ptr);
1467
1468         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1469         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1470
1471         if (agg_bufs) {
1472                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1473                         return -EBUSY;
1474
1475                 cp_cons = NEXT_CMP(cp_cons);
1476                 *event |= BNXT_AGG_EVENT;
1477         }
1478         *event |= BNXT_RX_EVENT;
1479
1480         rx_buf->data = NULL;
1481         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1482                 bnxt_reuse_rx_data(rxr, cons, data);
1483                 if (agg_bufs)
1484                         bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1485
1486                 rc = -EIO;
1487                 goto next_rx;
1488         }
1489
1490         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1491         dma_addr = rx_buf->mapping;
1492
1493         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1494                 rc = 1;
1495                 goto next_rx;
1496         }
1497
1498         if (len <= bp->rx_copy_thresh) {
1499                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1500                 bnxt_reuse_rx_data(rxr, cons, data);
1501                 if (!skb) {
1502                         rc = -ENOMEM;
1503                         goto next_rx;
1504                 }
1505         } else {
1506                 u32 payload;
1507
1508                 if (rx_buf->data_ptr == data_ptr)
1509                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1510                 else
1511                         payload = 0;
1512                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1513                                       payload | len);
1514                 if (!skb) {
1515                         rc = -ENOMEM;
1516                         goto next_rx;
1517                 }
1518         }
1519
1520         if (agg_bufs) {
1521                 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1522                 if (!skb) {
1523                         rc = -ENOMEM;
1524                         goto next_rx;
1525                 }
1526         }
1527
1528         if (RX_CMP_HASH_VALID(rxcmp)) {
1529                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1530                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1531
1532                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1533                 if (hash_type != 1 && hash_type != 3)
1534                         type = PKT_HASH_TYPE_L3;
1535                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1536         }
1537
1538         skb->protocol = eth_type_trans(skb, dev);
1539
1540         if ((rxcmp1->rx_cmp_flags2 &
1541              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1542             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1543                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1544                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
1545                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1546
1547                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1548         }
1549
1550         skb_checksum_none_assert(skb);
1551         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1552                 if (dev->features & NETIF_F_RXCSUM) {
1553                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1554                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1555                 }
1556         } else {
1557                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1558                         if (dev->features & NETIF_F_RXCSUM)
1559                                 cpr->rx_l4_csum_errors++;
1560                 }
1561         }
1562
1563         skb_record_rx_queue(skb, bnapi->index);
1564         napi_gro_receive(&bnapi->napi, skb);
1565         rc = 1;
1566
1567 next_rx:
1568         rxr->rx_prod = NEXT_RX(prod);
1569         rxr->rx_next_cons = NEXT_RX(cons);
1570
1571 next_rx_no_prod:
1572         *raw_cons = tmp_raw_cons;
1573
1574         return rc;
1575 }
1576
1577 /* In netpoll mode, if we are using a combined completion ring, we need to
1578  * discard the rx packets and recycle the buffers.
1579  */
1580 static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
1581                                  u32 *raw_cons, u8 *event)
1582 {
1583         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1584         u32 tmp_raw_cons = *raw_cons;
1585         struct rx_cmp_ext *rxcmp1;
1586         struct rx_cmp *rxcmp;
1587         u16 cp_cons;
1588         u8 cmp_type;
1589
1590         cp_cons = RING_CMP(tmp_raw_cons);
1591         rxcmp = (struct rx_cmp *)
1592                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1593
1594         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1595         cp_cons = RING_CMP(tmp_raw_cons);
1596         rxcmp1 = (struct rx_cmp_ext *)
1597                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1598
1599         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1600                 return -EBUSY;
1601
1602         cmp_type = RX_CMP_TYPE(rxcmp);
1603         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1604                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1605                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1606         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1607                 struct rx_tpa_end_cmp_ext *tpa_end1;
1608
1609                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1610                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1611                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1612         }
1613         return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
1614 }
1615
1616 #define BNXT_GET_EVENT_PORT(data)       \
1617         ((data) &                       \
1618          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1619
1620 static int bnxt_async_event_process(struct bnxt *bp,
1621                                     struct hwrm_async_event_cmpl *cmpl)
1622 {
1623         u16 event_id = le16_to_cpu(cmpl->event_id);
1624
1625         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1626         switch (event_id) {
1627         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1628                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1629                 struct bnxt_link_info *link_info = &bp->link_info;
1630
1631                 if (BNXT_VF(bp))
1632                         goto async_event_process_exit;
1633                 if (data1 & 0x20000) {
1634                         u16 fw_speed = link_info->force_link_speed;
1635                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1636
1637                         netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1638                                     speed);
1639                 }
1640                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1641                 /* fall thru */
1642         }
1643         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1644                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1645                 break;
1646         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1647                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1648                 break;
1649         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1650                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1651                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1652
1653                 if (BNXT_VF(bp))
1654                         break;
1655
1656                 if (bp->pf.port_id != port_id)
1657                         break;
1658
1659                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1660                 break;
1661         }
1662         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1663                 if (BNXT_PF(bp))
1664                         goto async_event_process_exit;
1665                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1666                 break;
1667         default:
1668                 goto async_event_process_exit;
1669         }
1670         schedule_work(&bp->sp_task);
1671 async_event_process_exit:
1672         bnxt_ulp_async_events(bp, cmpl);
1673         return 0;
1674 }
1675
1676 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1677 {
1678         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1679         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1680         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1681                                 (struct hwrm_fwd_req_cmpl *)txcmp;
1682
1683         switch (cmpl_type) {
1684         case CMPL_BASE_TYPE_HWRM_DONE:
1685                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1686                 if (seq_id == bp->hwrm_intr_seq_id)
1687                         bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1688                 else
1689                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1690                 break;
1691
1692         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1693                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1694
1695                 if ((vf_id < bp->pf.first_vf_id) ||
1696                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1697                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1698                                    vf_id);
1699                         return -EINVAL;
1700                 }
1701
1702                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1703                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1704                 schedule_work(&bp->sp_task);
1705                 break;
1706
1707         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1708                 bnxt_async_event_process(bp,
1709                                          (struct hwrm_async_event_cmpl *)txcmp);
1710
1711         default:
1712                 break;
1713         }
1714
1715         return 0;
1716 }
1717
1718 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1719 {
1720         struct bnxt_napi *bnapi = dev_instance;
1721         struct bnxt *bp = bnapi->bp;
1722         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1723         u32 cons = RING_CMP(cpr->cp_raw_cons);
1724
1725         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1726         napi_schedule(&bnapi->napi);
1727         return IRQ_HANDLED;
1728 }
1729
1730 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1731 {
1732         u32 raw_cons = cpr->cp_raw_cons;
1733         u16 cons = RING_CMP(raw_cons);
1734         struct tx_cmp *txcmp;
1735
1736         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1737
1738         return TX_CMP_VALID(txcmp, raw_cons);
1739 }
1740
1741 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1742 {
1743         struct bnxt_napi *bnapi = dev_instance;
1744         struct bnxt *bp = bnapi->bp;
1745         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1746         u32 cons = RING_CMP(cpr->cp_raw_cons);
1747         u32 int_status;
1748
1749         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1750
1751         if (!bnxt_has_work(bp, cpr)) {
1752                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1753                 /* return if erroneous interrupt */
1754                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1755                         return IRQ_NONE;
1756         }
1757
1758         /* disable ring IRQ */
1759         BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1760
1761         /* Return here if interrupt is shared and is disabled. */
1762         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1763                 return IRQ_HANDLED;
1764
1765         napi_schedule(&bnapi->napi);
1766         return IRQ_HANDLED;
1767 }
1768
1769 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1770 {
1771         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1772         u32 raw_cons = cpr->cp_raw_cons;
1773         u32 cons;
1774         int tx_pkts = 0;
1775         int rx_pkts = 0;
1776         u8 event = 0;
1777         struct tx_cmp *txcmp;
1778
1779         while (1) {
1780                 int rc;
1781
1782                 cons = RING_CMP(raw_cons);
1783                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1784
1785                 if (!TX_CMP_VALID(txcmp, raw_cons))
1786                         break;
1787
1788                 /* The valid test of the entry must be done first before
1789                  * reading any further.
1790                  */
1791                 dma_rmb();
1792                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1793                         tx_pkts++;
1794                         /* return full budget so NAPI will complete. */
1795                         if (unlikely(tx_pkts > bp->tx_wake_thresh))
1796                                 rx_pkts = budget;
1797                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1798                         if (likely(budget))
1799                                 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1800                         else
1801                                 rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
1802                                                            &event);
1803                         if (likely(rc >= 0))
1804                                 rx_pkts += rc;
1805                         else if (rc == -EBUSY)  /* partial completion */
1806                                 break;
1807                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1808                                      CMPL_BASE_TYPE_HWRM_DONE) ||
1809                                     (TX_CMP_TYPE(txcmp) ==
1810                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1811                                     (TX_CMP_TYPE(txcmp) ==
1812                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1813                         bnxt_hwrm_handler(bp, txcmp);
1814                 }
1815                 raw_cons = NEXT_RAW_CMP(raw_cons);
1816
1817                 if (rx_pkts == budget)
1818                         break;
1819         }
1820
1821         if (event & BNXT_TX_EVENT) {
1822                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
1823                 void __iomem *db = txr->tx_doorbell;
1824                 u16 prod = txr->tx_prod;
1825
1826                 /* Sync BD data before updating doorbell */
1827                 wmb();
1828
1829                 bnxt_db_write(bp, db, DB_KEY_TX | prod);
1830         }
1831
1832         cpr->cp_raw_cons = raw_cons;
1833         /* ACK completion ring before freeing tx ring and producing new
1834          * buffers in rx/agg rings to prevent overflowing the completion
1835          * ring.
1836          */
1837         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1838
1839         if (tx_pkts)
1840                 bnapi->tx_int(bp, bnapi, tx_pkts);
1841
1842         if (event & BNXT_RX_EVENT) {
1843                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1844
1845                 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
1846                 if (event & BNXT_AGG_EVENT)
1847                         bnxt_db_write(bp, rxr->rx_agg_doorbell,
1848                                       DB_KEY_RX | rxr->rx_agg_prod);
1849         }
1850         return rx_pkts;
1851 }
1852
1853 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1854 {
1855         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1856         struct bnxt *bp = bnapi->bp;
1857         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1858         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1859         struct tx_cmp *txcmp;
1860         struct rx_cmp_ext *rxcmp1;
1861         u32 cp_cons, tmp_raw_cons;
1862         u32 raw_cons = cpr->cp_raw_cons;
1863         u32 rx_pkts = 0;
1864         u8 event = 0;
1865
1866         while (1) {
1867                 int rc;
1868
1869                 cp_cons = RING_CMP(raw_cons);
1870                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1871
1872                 if (!TX_CMP_VALID(txcmp, raw_cons))
1873                         break;
1874
1875                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1876                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
1877                         cp_cons = RING_CMP(tmp_raw_cons);
1878                         rxcmp1 = (struct rx_cmp_ext *)
1879                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1880
1881                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1882                                 break;
1883
1884                         /* force an error to recycle the buffer */
1885                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1886                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1887
1888                         rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1889                         if (likely(rc == -EIO))
1890                                 rx_pkts++;
1891                         else if (rc == -EBUSY)  /* partial completion */
1892                                 break;
1893                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
1894                                     CMPL_BASE_TYPE_HWRM_DONE)) {
1895                         bnxt_hwrm_handler(bp, txcmp);
1896                 } else {
1897                         netdev_err(bp->dev,
1898                                    "Invalid completion received on special ring\n");
1899                 }
1900                 raw_cons = NEXT_RAW_CMP(raw_cons);
1901
1902                 if (rx_pkts == budget)
1903                         break;
1904         }
1905
1906         cpr->cp_raw_cons = raw_cons;
1907         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1908         bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
1909
1910         if (event & BNXT_AGG_EVENT)
1911                 bnxt_db_write(bp, rxr->rx_agg_doorbell,
1912                               DB_KEY_RX | rxr->rx_agg_prod);
1913
1914         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
1915                 napi_complete_done(napi, rx_pkts);
1916                 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1917         }
1918         return rx_pkts;
1919 }
1920
1921 static int bnxt_poll(struct napi_struct *napi, int budget)
1922 {
1923         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1924         struct bnxt *bp = bnapi->bp;
1925         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1926         int work_done = 0;
1927
1928         while (1) {
1929                 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1930
1931                 if (work_done >= budget)
1932                         break;
1933
1934                 if (!bnxt_has_work(bp, cpr)) {
1935                         if (napi_complete_done(napi, work_done))
1936                                 BNXT_CP_DB_REARM(cpr->cp_doorbell,
1937                                                  cpr->cp_raw_cons);
1938                         break;
1939                 }
1940         }
1941         mmiowb();
1942         return work_done;
1943 }
1944
1945 static void bnxt_free_tx_skbs(struct bnxt *bp)
1946 {
1947         int i, max_idx;
1948         struct pci_dev *pdev = bp->pdev;
1949
1950         if (!bp->tx_ring)
1951                 return;
1952
1953         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1954         for (i = 0; i < bp->tx_nr_rings; i++) {
1955                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1956                 int j;
1957
1958                 for (j = 0; j < max_idx;) {
1959                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1960                         struct sk_buff *skb = tx_buf->skb;
1961                         int k, last;
1962
1963                         if (!skb) {
1964                                 j++;
1965                                 continue;
1966                         }
1967
1968                         tx_buf->skb = NULL;
1969
1970                         if (tx_buf->is_push) {
1971                                 dev_kfree_skb(skb);
1972                                 j += 2;
1973                                 continue;
1974                         }
1975
1976                         dma_unmap_single(&pdev->dev,
1977                                          dma_unmap_addr(tx_buf, mapping),
1978                                          skb_headlen(skb),
1979                                          PCI_DMA_TODEVICE);
1980
1981                         last = tx_buf->nr_frags;
1982                         j += 2;
1983                         for (k = 0; k < last; k++, j++) {
1984                                 int ring_idx = j & bp->tx_ring_mask;
1985                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1986
1987                                 tx_buf = &txr->tx_buf_ring[ring_idx];
1988                                 dma_unmap_page(
1989                                         &pdev->dev,
1990                                         dma_unmap_addr(tx_buf, mapping),
1991                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
1992                         }
1993                         dev_kfree_skb(skb);
1994                 }
1995                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1996         }
1997 }
1998
1999 static void bnxt_free_rx_skbs(struct bnxt *bp)
2000 {
2001         int i, max_idx, max_agg_idx;
2002         struct pci_dev *pdev = bp->pdev;
2003
2004         if (!bp->rx_ring)
2005                 return;
2006
2007         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2008         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2009         for (i = 0; i < bp->rx_nr_rings; i++) {
2010                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2011                 int j;
2012
2013                 if (rxr->rx_tpa) {
2014                         for (j = 0; j < MAX_TPA; j++) {
2015                                 struct bnxt_tpa_info *tpa_info =
2016                                                         &rxr->rx_tpa[j];
2017                                 u8 *data = tpa_info->data;
2018
2019                                 if (!data)
2020                                         continue;
2021
2022                                 dma_unmap_single_attrs(&pdev->dev,
2023                                                        tpa_info->mapping,
2024                                                        bp->rx_buf_use_size,
2025                                                        bp->rx_dir,
2026                                                        DMA_ATTR_WEAK_ORDERING);
2027
2028                                 tpa_info->data = NULL;
2029
2030                                 kfree(data);
2031                         }
2032                 }
2033
2034                 for (j = 0; j < max_idx; j++) {
2035                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2036                         dma_addr_t mapping = rx_buf->mapping;
2037                         void *data = rx_buf->data;
2038
2039                         if (!data)
2040                                 continue;
2041
2042                         rx_buf->data = NULL;
2043
2044                         if (BNXT_RX_PAGE_MODE(bp)) {
2045                                 mapping -= bp->rx_dma_offset;
2046                                 dma_unmap_page_attrs(&pdev->dev, mapping,
2047                                                      PAGE_SIZE, bp->rx_dir,
2048                                                      DMA_ATTR_WEAK_ORDERING);
2049                                 __free_page(data);
2050                         } else {
2051                                 dma_unmap_single_attrs(&pdev->dev, mapping,
2052                                                        bp->rx_buf_use_size,
2053                                                        bp->rx_dir,
2054                                                        DMA_ATTR_WEAK_ORDERING);
2055                                 kfree(data);
2056                         }
2057                 }
2058
2059                 for (j = 0; j < max_agg_idx; j++) {
2060                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2061                                 &rxr->rx_agg_ring[j];
2062                         struct page *page = rx_agg_buf->page;
2063
2064                         if (!page)
2065                                 continue;
2066
2067                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2068                                              BNXT_RX_PAGE_SIZE,
2069                                              PCI_DMA_FROMDEVICE,
2070                                              DMA_ATTR_WEAK_ORDERING);
2071
2072                         rx_agg_buf->page = NULL;
2073                         __clear_bit(j, rxr->rx_agg_bmap);
2074
2075                         __free_page(page);
2076                 }
2077                 if (rxr->rx_page) {
2078                         __free_page(rxr->rx_page);
2079                         rxr->rx_page = NULL;
2080                 }
2081         }
2082 }
2083
2084 static void bnxt_free_skbs(struct bnxt *bp)
2085 {
2086         bnxt_free_tx_skbs(bp);
2087         bnxt_free_rx_skbs(bp);
2088 }
2089
2090 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2091 {
2092         struct pci_dev *pdev = bp->pdev;
2093         int i;
2094
2095         for (i = 0; i < ring->nr_pages; i++) {
2096                 if (!ring->pg_arr[i])
2097                         continue;
2098
2099                 dma_free_coherent(&pdev->dev, ring->page_size,
2100                                   ring->pg_arr[i], ring->dma_arr[i]);
2101
2102                 ring->pg_arr[i] = NULL;
2103         }
2104         if (ring->pg_tbl) {
2105                 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
2106                                   ring->pg_tbl, ring->pg_tbl_map);
2107                 ring->pg_tbl = NULL;
2108         }
2109         if (ring->vmem_size && *ring->vmem) {
2110                 vfree(*ring->vmem);
2111                 *ring->vmem = NULL;
2112         }
2113 }
2114
2115 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2116 {
2117         int i;
2118         struct pci_dev *pdev = bp->pdev;
2119
2120         if (ring->nr_pages > 1) {
2121                 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
2122                                                   ring->nr_pages * 8,
2123                                                   &ring->pg_tbl_map,
2124                                                   GFP_KERNEL);
2125                 if (!ring->pg_tbl)
2126                         return -ENOMEM;
2127         }
2128
2129         for (i = 0; i < ring->nr_pages; i++) {
2130                 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2131                                                      ring->page_size,
2132                                                      &ring->dma_arr[i],
2133                                                      GFP_KERNEL);
2134                 if (!ring->pg_arr[i])
2135                         return -ENOMEM;
2136
2137                 if (ring->nr_pages > 1)
2138                         ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
2139         }
2140
2141         if (ring->vmem_size) {
2142                 *ring->vmem = vzalloc(ring->vmem_size);
2143                 if (!(*ring->vmem))
2144                         return -ENOMEM;
2145         }
2146         return 0;
2147 }
2148
2149 static void bnxt_free_rx_rings(struct bnxt *bp)
2150 {
2151         int i;
2152
2153         if (!bp->rx_ring)
2154                 return;
2155
2156         for (i = 0; i < bp->rx_nr_rings; i++) {
2157                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2158                 struct bnxt_ring_struct *ring;
2159
2160                 if (rxr->xdp_prog)
2161                         bpf_prog_put(rxr->xdp_prog);
2162
2163                 kfree(rxr->rx_tpa);
2164                 rxr->rx_tpa = NULL;
2165
2166                 kfree(rxr->rx_agg_bmap);
2167                 rxr->rx_agg_bmap = NULL;
2168
2169                 ring = &rxr->rx_ring_struct;
2170                 bnxt_free_ring(bp, ring);
2171
2172                 ring = &rxr->rx_agg_ring_struct;
2173                 bnxt_free_ring(bp, ring);
2174         }
2175 }
2176
2177 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2178 {
2179         int i, rc, agg_rings = 0, tpa_rings = 0;
2180
2181         if (!bp->rx_ring)
2182                 return -ENOMEM;
2183
2184         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2185                 agg_rings = 1;
2186
2187         if (bp->flags & BNXT_FLAG_TPA)
2188                 tpa_rings = 1;
2189
2190         for (i = 0; i < bp->rx_nr_rings; i++) {
2191                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2192                 struct bnxt_ring_struct *ring;
2193
2194                 ring = &rxr->rx_ring_struct;
2195
2196                 rc = bnxt_alloc_ring(bp, ring);
2197                 if (rc)
2198                         return rc;
2199
2200                 if (agg_rings) {
2201                         u16 mem_size;
2202
2203                         ring = &rxr->rx_agg_ring_struct;
2204                         rc = bnxt_alloc_ring(bp, ring);
2205                         if (rc)
2206                                 return rc;
2207
2208                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2209                         mem_size = rxr->rx_agg_bmap_size / 8;
2210                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2211                         if (!rxr->rx_agg_bmap)
2212                                 return -ENOMEM;
2213
2214                         if (tpa_rings) {
2215                                 rxr->rx_tpa = kcalloc(MAX_TPA,
2216                                                 sizeof(struct bnxt_tpa_info),
2217                                                 GFP_KERNEL);
2218                                 if (!rxr->rx_tpa)
2219                                         return -ENOMEM;
2220                         }
2221                 }
2222         }
2223         return 0;
2224 }
2225
2226 static void bnxt_free_tx_rings(struct bnxt *bp)
2227 {
2228         int i;
2229         struct pci_dev *pdev = bp->pdev;
2230
2231         if (!bp->tx_ring)
2232                 return;
2233
2234         for (i = 0; i < bp->tx_nr_rings; i++) {
2235                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2236                 struct bnxt_ring_struct *ring;
2237
2238                 if (txr->tx_push) {
2239                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2240                                           txr->tx_push, txr->tx_push_mapping);
2241                         txr->tx_push = NULL;
2242                 }
2243
2244                 ring = &txr->tx_ring_struct;
2245
2246                 bnxt_free_ring(bp, ring);
2247         }
2248 }
2249
2250 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2251 {
2252         int i, j, rc;
2253         struct pci_dev *pdev = bp->pdev;
2254
2255         bp->tx_push_size = 0;
2256         if (bp->tx_push_thresh) {
2257                 int push_size;
2258
2259                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2260                                         bp->tx_push_thresh);
2261
2262                 if (push_size > 256) {
2263                         push_size = 0;
2264                         bp->tx_push_thresh = 0;
2265                 }
2266
2267                 bp->tx_push_size = push_size;
2268         }
2269
2270         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2271                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2272                 struct bnxt_ring_struct *ring;
2273
2274                 ring = &txr->tx_ring_struct;
2275
2276                 rc = bnxt_alloc_ring(bp, ring);
2277                 if (rc)
2278                         return rc;
2279
2280                 if (bp->tx_push_size) {
2281                         dma_addr_t mapping;
2282
2283                         /* One pre-allocated DMA buffer to backup
2284                          * TX push operation
2285                          */
2286                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2287                                                 bp->tx_push_size,
2288                                                 &txr->tx_push_mapping,
2289                                                 GFP_KERNEL);
2290
2291                         if (!txr->tx_push)
2292                                 return -ENOMEM;
2293
2294                         mapping = txr->tx_push_mapping +
2295                                 sizeof(struct tx_push_bd);
2296                         txr->data_mapping = cpu_to_le64(mapping);
2297
2298                         memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
2299                 }
2300                 ring->queue_id = bp->q_info[j].queue_id;
2301                 if (i < bp->tx_nr_rings_xdp)
2302                         continue;
2303                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2304                         j++;
2305         }
2306         return 0;
2307 }
2308
2309 static void bnxt_free_cp_rings(struct bnxt *bp)
2310 {
2311         int i;
2312
2313         if (!bp->bnapi)
2314                 return;
2315
2316         for (i = 0; i < bp->cp_nr_rings; i++) {
2317                 struct bnxt_napi *bnapi = bp->bnapi[i];
2318                 struct bnxt_cp_ring_info *cpr;
2319                 struct bnxt_ring_struct *ring;
2320
2321                 if (!bnapi)
2322                         continue;
2323
2324                 cpr = &bnapi->cp_ring;
2325                 ring = &cpr->cp_ring_struct;
2326
2327                 bnxt_free_ring(bp, ring);
2328         }
2329 }
2330
2331 static int bnxt_alloc_cp_rings(struct bnxt *bp)
2332 {
2333         int i, rc;
2334
2335         for (i = 0; i < bp->cp_nr_rings; i++) {
2336                 struct bnxt_napi *bnapi = bp->bnapi[i];
2337                 struct bnxt_cp_ring_info *cpr;
2338                 struct bnxt_ring_struct *ring;
2339
2340                 if (!bnapi)
2341                         continue;
2342
2343                 cpr = &bnapi->cp_ring;
2344                 ring = &cpr->cp_ring_struct;
2345
2346                 rc = bnxt_alloc_ring(bp, ring);
2347                 if (rc)
2348                         return rc;
2349         }
2350         return 0;
2351 }
2352
2353 static void bnxt_init_ring_struct(struct bnxt *bp)
2354 {
2355         int i;
2356
2357         for (i = 0; i < bp->cp_nr_rings; i++) {
2358                 struct bnxt_napi *bnapi = bp->bnapi[i];
2359                 struct bnxt_cp_ring_info *cpr;
2360                 struct bnxt_rx_ring_info *rxr;
2361                 struct bnxt_tx_ring_info *txr;
2362                 struct bnxt_ring_struct *ring;
2363
2364                 if (!bnapi)
2365                         continue;
2366
2367                 cpr = &bnapi->cp_ring;
2368                 ring = &cpr->cp_ring_struct;
2369                 ring->nr_pages = bp->cp_nr_pages;
2370                 ring->page_size = HW_CMPD_RING_SIZE;
2371                 ring->pg_arr = (void **)cpr->cp_desc_ring;
2372                 ring->dma_arr = cpr->cp_desc_mapping;
2373                 ring->vmem_size = 0;
2374
2375                 rxr = bnapi->rx_ring;
2376                 if (!rxr)
2377                         goto skip_rx;
2378
2379                 ring = &rxr->rx_ring_struct;
2380                 ring->nr_pages = bp->rx_nr_pages;
2381                 ring->page_size = HW_RXBD_RING_SIZE;
2382                 ring->pg_arr = (void **)rxr->rx_desc_ring;
2383                 ring->dma_arr = rxr->rx_desc_mapping;
2384                 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2385                 ring->vmem = (void **)&rxr->rx_buf_ring;
2386
2387                 ring = &rxr->rx_agg_ring_struct;
2388                 ring->nr_pages = bp->rx_agg_nr_pages;
2389                 ring->page_size = HW_RXBD_RING_SIZE;
2390                 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2391                 ring->dma_arr = rxr->rx_agg_desc_mapping;
2392                 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2393                 ring->vmem = (void **)&rxr->rx_agg_ring;
2394
2395 skip_rx:
2396                 txr = bnapi->tx_ring;
2397                 if (!txr)
2398                         continue;
2399
2400                 ring = &txr->tx_ring_struct;
2401                 ring->nr_pages = bp->tx_nr_pages;
2402                 ring->page_size = HW_RXBD_RING_SIZE;
2403                 ring->pg_arr = (void **)txr->tx_desc_ring;
2404                 ring->dma_arr = txr->tx_desc_mapping;
2405                 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2406                 ring->vmem = (void **)&txr->tx_buf_ring;
2407         }
2408 }
2409
2410 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2411 {
2412         int i;
2413         u32 prod;
2414         struct rx_bd **rx_buf_ring;
2415
2416         rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2417         for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2418                 int j;
2419                 struct rx_bd *rxbd;
2420
2421                 rxbd = rx_buf_ring[i];
2422                 if (!rxbd)
2423                         continue;
2424
2425                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2426                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2427                         rxbd->rx_bd_opaque = prod;
2428                 }
2429         }
2430 }
2431
2432 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2433 {
2434         struct net_device *dev = bp->dev;
2435         struct bnxt_rx_ring_info *rxr;
2436         struct bnxt_ring_struct *ring;
2437         u32 prod, type;
2438         int i;
2439
2440         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2441                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2442
2443         if (NET_IP_ALIGN == 2)
2444                 type |= RX_BD_FLAGS_SOP;
2445
2446         rxr = &bp->rx_ring[ring_nr];
2447         ring = &rxr->rx_ring_struct;
2448         bnxt_init_rxbd_pages(ring, type);
2449
2450         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2451                 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2452                 if (IS_ERR(rxr->xdp_prog)) {
2453                         int rc = PTR_ERR(rxr->xdp_prog);
2454
2455                         rxr->xdp_prog = NULL;
2456                         return rc;
2457                 }
2458         }
2459         prod = rxr->rx_prod;
2460         for (i = 0; i < bp->rx_ring_size; i++) {
2461                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2462                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2463                                     ring_nr, i, bp->rx_ring_size);
2464                         break;
2465                 }
2466                 prod = NEXT_RX(prod);
2467         }
2468         rxr->rx_prod = prod;
2469         ring->fw_ring_id = INVALID_HW_RING_ID;
2470
2471         ring = &rxr->rx_agg_ring_struct;
2472         ring->fw_ring_id = INVALID_HW_RING_ID;
2473
2474         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2475                 return 0;
2476
2477         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2478                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2479
2480         bnxt_init_rxbd_pages(ring, type);
2481
2482         prod = rxr->rx_agg_prod;
2483         for (i = 0; i < bp->rx_agg_ring_size; i++) {
2484                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2485                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2486                                     ring_nr, i, bp->rx_ring_size);
2487                         break;
2488                 }
2489                 prod = NEXT_RX_AGG(prod);
2490         }
2491         rxr->rx_agg_prod = prod;
2492
2493         if (bp->flags & BNXT_FLAG_TPA) {
2494                 if (rxr->rx_tpa) {
2495                         u8 *data;
2496                         dma_addr_t mapping;
2497
2498                         for (i = 0; i < MAX_TPA; i++) {
2499                                 data = __bnxt_alloc_rx_data(bp, &mapping,
2500                                                             GFP_KERNEL);
2501                                 if (!data)
2502                                         return -ENOMEM;
2503
2504                                 rxr->rx_tpa[i].data = data;
2505                                 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
2506                                 rxr->rx_tpa[i].mapping = mapping;
2507                         }
2508                 } else {
2509                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2510                         return -ENOMEM;
2511                 }
2512         }
2513
2514         return 0;
2515 }
2516
2517 static void bnxt_init_cp_rings(struct bnxt *bp)
2518 {
2519         int i;
2520
2521         for (i = 0; i < bp->cp_nr_rings; i++) {
2522                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2523                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2524
2525                 ring->fw_ring_id = INVALID_HW_RING_ID;
2526         }
2527 }
2528
2529 static int bnxt_init_rx_rings(struct bnxt *bp)
2530 {
2531         int i, rc = 0;
2532
2533         if (BNXT_RX_PAGE_MODE(bp)) {
2534                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2535                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
2536         } else {
2537                 bp->rx_offset = BNXT_RX_OFFSET;
2538                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2539         }
2540
2541         for (i = 0; i < bp->rx_nr_rings; i++) {
2542                 rc = bnxt_init_one_rx_ring(bp, i);
2543                 if (rc)
2544                         break;
2545         }
2546
2547         return rc;
2548 }
2549
2550 static int bnxt_init_tx_rings(struct bnxt *bp)
2551 {
2552         u16 i;
2553
2554         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2555                                    MAX_SKB_FRAGS + 1);
2556
2557         for (i = 0; i < bp->tx_nr_rings; i++) {
2558                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2559                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2560
2561                 ring->fw_ring_id = INVALID_HW_RING_ID;
2562         }
2563
2564         return 0;
2565 }
2566
2567 static void bnxt_free_ring_grps(struct bnxt *bp)
2568 {
2569         kfree(bp->grp_info);
2570         bp->grp_info = NULL;
2571 }
2572
2573 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2574 {
2575         int i;
2576
2577         if (irq_re_init) {
2578                 bp->grp_info = kcalloc(bp->cp_nr_rings,
2579                                        sizeof(struct bnxt_ring_grp_info),
2580                                        GFP_KERNEL);
2581                 if (!bp->grp_info)
2582                         return -ENOMEM;
2583         }
2584         for (i = 0; i < bp->cp_nr_rings; i++) {
2585                 if (irq_re_init)
2586                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2587                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2588                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2589                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2590                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2591         }
2592         return 0;
2593 }
2594
2595 static void bnxt_free_vnics(struct bnxt *bp)
2596 {
2597         kfree(bp->vnic_info);
2598         bp->vnic_info = NULL;
2599         bp->nr_vnics = 0;
2600 }
2601
2602 static int bnxt_alloc_vnics(struct bnxt *bp)
2603 {
2604         int num_vnics = 1;
2605
2606 #ifdef CONFIG_RFS_ACCEL
2607         if (bp->flags & BNXT_FLAG_RFS)
2608                 num_vnics += bp->rx_nr_rings;
2609 #endif
2610
2611         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2612                 num_vnics++;
2613
2614         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2615                                 GFP_KERNEL);
2616         if (!bp->vnic_info)
2617                 return -ENOMEM;
2618
2619         bp->nr_vnics = num_vnics;
2620         return 0;
2621 }
2622
2623 static void bnxt_init_vnics(struct bnxt *bp)
2624 {
2625         int i;
2626
2627         for (i = 0; i < bp->nr_vnics; i++) {
2628                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2629
2630                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2631                 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
2632                 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
2633                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2634
2635                 if (bp->vnic_info[i].rss_hash_key) {
2636                         if (i == 0)
2637                                 prandom_bytes(vnic->rss_hash_key,
2638                                               HW_HASH_KEY_SIZE);
2639                         else
2640                                 memcpy(vnic->rss_hash_key,
2641                                        bp->vnic_info[0].rss_hash_key,
2642                                        HW_HASH_KEY_SIZE);
2643                 }
2644         }
2645 }
2646
2647 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2648 {
2649         int pages;
2650
2651         pages = ring_size / desc_per_pg;
2652
2653         if (!pages)
2654                 return 1;
2655
2656         pages++;
2657
2658         while (pages & (pages - 1))
2659                 pages++;
2660
2661         return pages;
2662 }
2663
2664 void bnxt_set_tpa_flags(struct bnxt *bp)
2665 {
2666         bp->flags &= ~BNXT_FLAG_TPA;
2667         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
2668                 return;
2669         if (bp->dev->features & NETIF_F_LRO)
2670                 bp->flags |= BNXT_FLAG_LRO;
2671         if (bp->dev->features & NETIF_F_GRO)
2672                 bp->flags |= BNXT_FLAG_GRO;
2673 }
2674
2675 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2676  * be set on entry.
2677  */
2678 void bnxt_set_ring_params(struct bnxt *bp)
2679 {
2680         u32 ring_size, rx_size, rx_space;
2681         u32 agg_factor = 0, agg_ring_size = 0;
2682
2683         /* 8 for CRC and VLAN */
2684         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2685
2686         rx_space = rx_size + NET_SKB_PAD +
2687                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2688
2689         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2690         ring_size = bp->rx_ring_size;
2691         bp->rx_agg_ring_size = 0;
2692         bp->rx_agg_nr_pages = 0;
2693
2694         if (bp->flags & BNXT_FLAG_TPA)
2695                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
2696
2697         bp->flags &= ~BNXT_FLAG_JUMBO;
2698         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
2699                 u32 jumbo_factor;
2700
2701                 bp->flags |= BNXT_FLAG_JUMBO;
2702                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2703                 if (jumbo_factor > agg_factor)
2704                         agg_factor = jumbo_factor;
2705         }
2706         agg_ring_size = ring_size * agg_factor;
2707
2708         if (agg_ring_size) {
2709                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2710                                                         RX_DESC_CNT);
2711                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2712                         u32 tmp = agg_ring_size;
2713
2714                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2715                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2716                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2717                                     tmp, agg_ring_size);
2718                 }
2719                 bp->rx_agg_ring_size = agg_ring_size;
2720                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2721                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2722                 rx_space = rx_size + NET_SKB_PAD +
2723                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2724         }
2725
2726         bp->rx_buf_use_size = rx_size;
2727         bp->rx_buf_size = rx_space;
2728
2729         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2730         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2731
2732         ring_size = bp->tx_ring_size;
2733         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2734         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2735
2736         ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2737         bp->cp_ring_size = ring_size;
2738
2739         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2740         if (bp->cp_nr_pages > MAX_CP_PAGES) {
2741                 bp->cp_nr_pages = MAX_CP_PAGES;
2742                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2743                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2744                             ring_size, bp->cp_ring_size);
2745         }
2746         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2747         bp->cp_ring_mask = bp->cp_bit - 1;
2748 }
2749
2750 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
2751 {
2752         if (page_mode) {
2753                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
2754                         return -EOPNOTSUPP;
2755                 bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
2756                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
2757                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
2758                 bp->dev->hw_features &= ~NETIF_F_LRO;
2759                 bp->dev->features &= ~NETIF_F_LRO;
2760                 bp->rx_dir = DMA_BIDIRECTIONAL;
2761                 bp->rx_skb_func = bnxt_rx_page_skb;
2762         } else {
2763                 bp->dev->max_mtu = BNXT_MAX_MTU;
2764                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
2765                 bp->rx_dir = DMA_FROM_DEVICE;
2766                 bp->rx_skb_func = bnxt_rx_skb;
2767         }
2768         return 0;
2769 }
2770
2771 static void bnxt_free_vnic_attributes(struct bnxt *bp)
2772 {
2773         int i;
2774         struct bnxt_vnic_info *vnic;
2775         struct pci_dev *pdev = bp->pdev;
2776
2777         if (!bp->vnic_info)
2778                 return;
2779
2780         for (i = 0; i < bp->nr_vnics; i++) {
2781                 vnic = &bp->vnic_info[i];
2782
2783                 kfree(vnic->fw_grp_ids);
2784                 vnic->fw_grp_ids = NULL;
2785
2786                 kfree(vnic->uc_list);
2787                 vnic->uc_list = NULL;
2788
2789                 if (vnic->mc_list) {
2790                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2791                                           vnic->mc_list, vnic->mc_list_mapping);
2792                         vnic->mc_list = NULL;
2793                 }
2794
2795                 if (vnic->rss_table) {
2796                         dma_free_coherent(&pdev->dev, PAGE_SIZE,
2797                                           vnic->rss_table,
2798                                           vnic->rss_table_dma_addr);
2799                         vnic->rss_table = NULL;
2800                 }
2801
2802                 vnic->rss_hash_key = NULL;
2803                 vnic->flags = 0;
2804         }
2805 }
2806
2807 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2808 {
2809         int i, rc = 0, size;
2810         struct bnxt_vnic_info *vnic;
2811         struct pci_dev *pdev = bp->pdev;
2812         int max_rings;
2813
2814         for (i = 0; i < bp->nr_vnics; i++) {
2815                 vnic = &bp->vnic_info[i];
2816
2817                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2818                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2819
2820                         if (mem_size > 0) {
2821                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2822                                 if (!vnic->uc_list) {
2823                                         rc = -ENOMEM;
2824                                         goto out;
2825                                 }
2826                         }
2827                 }
2828
2829                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2830                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2831                         vnic->mc_list =
2832                                 dma_alloc_coherent(&pdev->dev,
2833                                                    vnic->mc_list_size,
2834                                                    &vnic->mc_list_mapping,
2835                                                    GFP_KERNEL);
2836                         if (!vnic->mc_list) {
2837                                 rc = -ENOMEM;
2838                                 goto out;
2839                         }
2840                 }
2841
2842                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2843                         max_rings = bp->rx_nr_rings;
2844                 else
2845                         max_rings = 1;
2846
2847                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2848                 if (!vnic->fw_grp_ids) {
2849                         rc = -ENOMEM;
2850                         goto out;
2851                 }
2852
2853                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
2854                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
2855                         continue;
2856
2857                 /* Allocate rss table and hash key */
2858                 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2859                                                      &vnic->rss_table_dma_addr,
2860                                                      GFP_KERNEL);
2861                 if (!vnic->rss_table) {
2862                         rc = -ENOMEM;
2863                         goto out;
2864                 }
2865
2866                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2867
2868                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2869                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2870         }
2871         return 0;
2872
2873 out:
2874         return rc;
2875 }
2876
2877 static void bnxt_free_hwrm_resources(struct bnxt *bp)
2878 {
2879         struct pci_dev *pdev = bp->pdev;
2880
2881         dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2882                           bp->hwrm_cmd_resp_dma_addr);
2883
2884         bp->hwrm_cmd_resp_addr = NULL;
2885         if (bp->hwrm_dbg_resp_addr) {
2886                 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2887                                   bp->hwrm_dbg_resp_addr,
2888                                   bp->hwrm_dbg_resp_dma_addr);
2889
2890                 bp->hwrm_dbg_resp_addr = NULL;
2891         }
2892 }
2893
2894 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2895 {
2896         struct pci_dev *pdev = bp->pdev;
2897
2898         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2899                                                    &bp->hwrm_cmd_resp_dma_addr,
2900                                                    GFP_KERNEL);
2901         if (!bp->hwrm_cmd_resp_addr)
2902                 return -ENOMEM;
2903         bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2904                                                     HWRM_DBG_REG_BUF_SIZE,
2905                                                     &bp->hwrm_dbg_resp_dma_addr,
2906                                                     GFP_KERNEL);
2907         if (!bp->hwrm_dbg_resp_addr)
2908                 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2909
2910         return 0;
2911 }
2912
2913 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
2914 {
2915         if (bp->hwrm_short_cmd_req_addr) {
2916                 struct pci_dev *pdev = bp->pdev;
2917
2918                 dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
2919                                   bp->hwrm_short_cmd_req_addr,
2920                                   bp->hwrm_short_cmd_req_dma_addr);
2921                 bp->hwrm_short_cmd_req_addr = NULL;
2922         }
2923 }
2924
2925 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
2926 {
2927         struct pci_dev *pdev = bp->pdev;
2928
2929         bp->hwrm_short_cmd_req_addr =
2930                 dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
2931                                    &bp->hwrm_short_cmd_req_dma_addr,
2932                                    GFP_KERNEL);
2933         if (!bp->hwrm_short_cmd_req_addr)
2934                 return -ENOMEM;
2935
2936         return 0;
2937 }
2938
2939 static void bnxt_free_stats(struct bnxt *bp)
2940 {
2941         u32 size, i;
2942         struct pci_dev *pdev = bp->pdev;
2943
2944         if (bp->hw_rx_port_stats) {
2945                 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
2946                                   bp->hw_rx_port_stats,
2947                                   bp->hw_rx_port_stats_map);
2948                 bp->hw_rx_port_stats = NULL;
2949                 bp->flags &= ~BNXT_FLAG_PORT_STATS;
2950         }
2951
2952         if (!bp->bnapi)
2953                 return;
2954
2955         size = sizeof(struct ctx_hw_stats);
2956
2957         for (i = 0; i < bp->cp_nr_rings; i++) {
2958                 struct bnxt_napi *bnapi = bp->bnapi[i];
2959                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2960
2961                 if (cpr->hw_stats) {
2962                         dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2963                                           cpr->hw_stats_map);
2964                         cpr->hw_stats = NULL;
2965                 }
2966         }
2967 }
2968
2969 static int bnxt_alloc_stats(struct bnxt *bp)
2970 {
2971         u32 size, i;
2972         struct pci_dev *pdev = bp->pdev;
2973
2974         size = sizeof(struct ctx_hw_stats);
2975
2976         for (i = 0; i < bp->cp_nr_rings; i++) {
2977                 struct bnxt_napi *bnapi = bp->bnapi[i];
2978                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2979
2980                 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2981                                                    &cpr->hw_stats_map,
2982                                                    GFP_KERNEL);
2983                 if (!cpr->hw_stats)
2984                         return -ENOMEM;
2985
2986                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2987         }
2988
2989         if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
2990                 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
2991                                          sizeof(struct tx_port_stats) + 1024;
2992
2993                 bp->hw_rx_port_stats =
2994                         dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
2995                                            &bp->hw_rx_port_stats_map,
2996                                            GFP_KERNEL);
2997                 if (!bp->hw_rx_port_stats)
2998                         return -ENOMEM;
2999
3000                 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3001                                        512;
3002                 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3003                                            sizeof(struct rx_port_stats) + 512;
3004                 bp->flags |= BNXT_FLAG_PORT_STATS;
3005         }
3006         return 0;
3007 }
3008
3009 static void bnxt_clear_ring_indices(struct bnxt *bp)
3010 {
3011         int i;
3012
3013         if (!bp->bnapi)
3014                 return;
3015
3016         for (i = 0; i < bp->cp_nr_rings; i++) {
3017                 struct bnxt_napi *bnapi = bp->bnapi[i];
3018                 struct bnxt_cp_ring_info *cpr;
3019                 struct bnxt_rx_ring_info *rxr;
3020                 struct bnxt_tx_ring_info *txr;
3021
3022                 if (!bnapi)
3023                         continue;
3024
3025                 cpr = &bnapi->cp_ring;
3026                 cpr->cp_raw_cons = 0;
3027
3028                 txr = bnapi->tx_ring;
3029                 if (txr) {
3030                         txr->tx_prod = 0;
3031                         txr->tx_cons = 0;
3032                 }
3033
3034                 rxr = bnapi->rx_ring;
3035                 if (rxr) {
3036                         rxr->rx_prod = 0;
3037                         rxr->rx_agg_prod = 0;
3038                         rxr->rx_sw_agg_prod = 0;
3039                         rxr->rx_next_cons = 0;
3040                 }
3041         }
3042 }
3043
3044 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3045 {
3046 #ifdef CONFIG_RFS_ACCEL
3047         int i;
3048
3049         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
3050          * safe to delete the hash table.
3051          */
3052         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3053                 struct hlist_head *head;
3054                 struct hlist_node *tmp;
3055                 struct bnxt_ntuple_filter *fltr;
3056
3057                 head = &bp->ntp_fltr_hash_tbl[i];
3058                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3059                         hlist_del(&fltr->hash);
3060                         kfree(fltr);
3061                 }
3062         }
3063         if (irq_reinit) {
3064                 kfree(bp->ntp_fltr_bmap);
3065                 bp->ntp_fltr_bmap = NULL;
3066         }
3067         bp->ntp_fltr_count = 0;
3068 #endif
3069 }
3070
3071 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3072 {
3073 #ifdef CONFIG_RFS_ACCEL
3074         int i, rc = 0;
3075
3076         if (!(bp->flags & BNXT_FLAG_RFS))
3077                 return 0;
3078
3079         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3080                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3081
3082         bp->ntp_fltr_count = 0;
3083         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3084                                     sizeof(long),
3085                                     GFP_KERNEL);
3086
3087         if (!bp->ntp_fltr_bmap)
3088                 rc = -ENOMEM;
3089
3090         return rc;
3091 #else
3092         return 0;
3093 #endif
3094 }
3095
3096 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3097 {
3098         bnxt_free_vnic_attributes(bp);
3099         bnxt_free_tx_rings(bp);
3100         bnxt_free_rx_rings(bp);
3101         bnxt_free_cp_rings(bp);
3102         bnxt_free_ntp_fltrs(bp, irq_re_init);
3103         if (irq_re_init) {
3104                 bnxt_free_stats(bp);
3105                 bnxt_free_ring_grps(bp);
3106                 bnxt_free_vnics(bp);
3107                 kfree(bp->tx_ring_map);
3108                 bp->tx_ring_map = NULL;
3109                 kfree(bp->tx_ring);
3110                 bp->tx_ring = NULL;
3111                 kfree(bp->rx_ring);
3112                 bp->rx_ring = NULL;
3113                 kfree(bp->bnapi);
3114                 bp->bnapi = NULL;
3115         } else {
3116                 bnxt_clear_ring_indices(bp);
3117         }
3118 }
3119
3120 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3121 {
3122         int i, j, rc, size, arr_size;
3123         void *bnapi;
3124
3125         if (irq_re_init) {
3126                 /* Allocate bnapi mem pointer array and mem block for
3127                  * all queues
3128                  */
3129                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3130                                 bp->cp_nr_rings);
3131                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3132                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3133                 if (!bnapi)
3134                         return -ENOMEM;
3135
3136                 bp->bnapi = bnapi;
3137                 bnapi += arr_size;
3138                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3139                         bp->bnapi[i] = bnapi;
3140                         bp->bnapi[i]->index = i;
3141                         bp->bnapi[i]->bp = bp;
3142                 }
3143
3144                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3145                                       sizeof(struct bnxt_rx_ring_info),
3146                                       GFP_KERNEL);
3147                 if (!bp->rx_ring)
3148                         return -ENOMEM;
3149
3150                 for (i = 0; i < bp->rx_nr_rings; i++) {
3151                         bp->rx_ring[i].bnapi = bp->bnapi[i];
3152                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3153                 }
3154
3155                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3156                                       sizeof(struct bnxt_tx_ring_info),
3157                                       GFP_KERNEL);
3158                 if (!bp->tx_ring)
3159                         return -ENOMEM;
3160
3161                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3162                                           GFP_KERNEL);
3163
3164                 if (!bp->tx_ring_map)
3165                         return -ENOMEM;
3166
3167                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3168                         j = 0;
3169                 else
3170                         j = bp->rx_nr_rings;
3171
3172                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3173                         bp->tx_ring[i].bnapi = bp->bnapi[j];
3174                         bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
3175                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
3176                         if (i >= bp->tx_nr_rings_xdp) {
3177                                 bp->tx_ring[i].txq_index = i -
3178                                         bp->tx_nr_rings_xdp;
3179                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
3180                         } else {
3181                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
3182                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3183                         }
3184                 }
3185
3186                 rc = bnxt_alloc_stats(bp);
3187                 if (rc)
3188                         goto alloc_mem_err;
3189
3190                 rc = bnxt_alloc_ntp_fltrs(bp);
3191                 if (rc)
3192                         goto alloc_mem_err;
3193
3194                 rc = bnxt_alloc_vnics(bp);
3195                 if (rc)
3196                         goto alloc_mem_err;
3197         }
3198
3199         bnxt_init_ring_struct(bp);
3200
3201         rc = bnxt_alloc_rx_rings(bp);
3202         if (rc)
3203                 goto alloc_mem_err;
3204
3205         rc = bnxt_alloc_tx_rings(bp);
3206         if (rc)
3207                 goto alloc_mem_err;
3208
3209         rc = bnxt_alloc_cp_rings(bp);
3210         if (rc)
3211                 goto alloc_mem_err;
3212
3213         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3214                                   BNXT_VNIC_UCAST_FLAG;
3215         rc = bnxt_alloc_vnic_attributes(bp);
3216         if (rc)
3217                 goto alloc_mem_err;
3218         return 0;
3219
3220 alloc_mem_err:
3221         bnxt_free_mem(bp, true);
3222         return rc;
3223 }
3224
3225 static void bnxt_disable_int(struct bnxt *bp)
3226 {
3227         int i;
3228
3229         if (!bp->bnapi)
3230                 return;
3231
3232         for (i = 0; i < bp->cp_nr_rings; i++) {
3233                 struct bnxt_napi *bnapi = bp->bnapi[i];
3234                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3235                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3236
3237                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
3238                         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3239         }
3240 }
3241
3242 static void bnxt_disable_int_sync(struct bnxt *bp)
3243 {
3244         int i;
3245
3246         atomic_inc(&bp->intr_sem);
3247
3248         bnxt_disable_int(bp);
3249         for (i = 0; i < bp->cp_nr_rings; i++)
3250                 synchronize_irq(bp->irq_tbl[i].vector);
3251 }
3252
3253 static void bnxt_enable_int(struct bnxt *bp)
3254 {
3255         int i;
3256
3257         atomic_set(&bp->intr_sem, 0);
3258         for (i = 0; i < bp->cp_nr_rings; i++) {
3259                 struct bnxt_napi *bnapi = bp->bnapi[i];
3260                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3261
3262                 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
3263         }
3264 }
3265
3266 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3267                             u16 cmpl_ring, u16 target_id)
3268 {
3269         struct input *req = request;
3270
3271         req->req_type = cpu_to_le16(req_type);
3272         req->cmpl_ring = cpu_to_le16(cmpl_ring);
3273         req->target_id = cpu_to_le16(target_id);
3274         req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3275 }
3276
3277 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3278                                  int timeout, bool silent)
3279 {
3280         int i, intr_process, rc, tmo_count;
3281         struct input *req = msg;
3282         u32 *data = msg;
3283         __le32 *resp_len, *valid;
3284         u16 cp_ring_id, len = 0;
3285         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3286         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
3287
3288         req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
3289         memset(resp, 0, PAGE_SIZE);
3290         cp_ring_id = le16_to_cpu(req->cmpl_ring);
3291         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3292
3293         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
3294                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3295                 struct hwrm_short_input short_input = {0};
3296
3297                 memcpy(short_cmd_req, req, msg_len);
3298                 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
3299                                                    msg_len);
3300
3301                 short_input.req_type = req->req_type;
3302                 short_input.signature =
3303                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3304                 short_input.size = cpu_to_le16(msg_len);
3305                 short_input.req_addr =
3306                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3307
3308                 data = (u32 *)&short_input;
3309                 msg_len = sizeof(short_input);
3310
3311                 /* Sync memory write before updating doorbell */
3312                 wmb();
3313
3314                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3315         }
3316
3317         /* Write request msg to hwrm channel */
3318         __iowrite32_copy(bp->bar0, data, msg_len / 4);
3319
3320         for (i = msg_len; i < max_req_len; i += 4)
3321                 writel(0, bp->bar0 + i);
3322
3323         /* currently supports only one outstanding message */
3324         if (intr_process)
3325                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
3326
3327         /* Ring channel doorbell */
3328         writel(1, bp->bar0 + 0x100);
3329
3330         if (!timeout)
3331                 timeout = DFLT_HWRM_CMD_TIMEOUT;
3332
3333         i = 0;
3334         tmo_count = timeout * 40;
3335         if (intr_process) {
3336                 /* Wait until hwrm response cmpl interrupt is processed */
3337                 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
3338                        i++ < tmo_count) {
3339                         usleep_range(25, 40);
3340                 }
3341
3342                 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3343                         netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
3344                                    le16_to_cpu(req->req_type));
3345                         return -1;
3346                 }
3347         } else {
3348                 /* Check if response len is updated */
3349                 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
3350                 for (i = 0; i < tmo_count; i++) {
3351                         len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3352                               HWRM_RESP_LEN_SFT;
3353                         if (len)
3354                                 break;
3355                         usleep_range(25, 40);
3356                 }
3357
3358                 if (i >= tmo_count) {
3359                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
3360                                    timeout, le16_to_cpu(req->req_type),
3361                                    le16_to_cpu(req->seq_id), len);
3362                         return -1;
3363                 }
3364
3365                 /* Last word of resp contains valid bit */
3366                 valid = bp->hwrm_cmd_resp_addr + len - 4;
3367                 for (i = 0; i < 5; i++) {
3368                         if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
3369                                 break;
3370                         udelay(1);
3371                 }
3372
3373                 if (i >= 5) {
3374                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
3375                                    timeout, le16_to_cpu(req->req_type),
3376                                    le16_to_cpu(req->seq_id), len, *valid);
3377                         return -1;
3378                 }
3379         }
3380
3381         rc = le16_to_cpu(resp->error_code);
3382         if (rc && !silent)
3383                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3384                            le16_to_cpu(resp->req_type),
3385                            le16_to_cpu(resp->seq_id), rc);
3386         return rc;
3387 }
3388
3389 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3390 {
3391         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3392 }
3393
3394 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3395 {
3396         int rc;
3397
3398         mutex_lock(&bp->hwrm_cmd_lock);
3399         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3400         mutex_unlock(&bp->hwrm_cmd_lock);
3401         return rc;
3402 }
3403
3404 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3405                              int timeout)
3406 {
3407         int rc;
3408
3409         mutex_lock(&bp->hwrm_cmd_lock);
3410         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3411         mutex_unlock(&bp->hwrm_cmd_lock);
3412         return rc;
3413 }
3414
3415 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3416                                      int bmap_size)
3417 {
3418         struct hwrm_func_drv_rgtr_input req = {0};
3419         DECLARE_BITMAP(async_events_bmap, 256);
3420         u32 *events = (u32 *)async_events_bmap;
3421         int i;
3422
3423         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3424
3425         req.enables =
3426                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
3427
3428         memset(async_events_bmap, 0, sizeof(async_events_bmap));
3429         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3430                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3431
3432         if (bmap && bmap_size) {
3433                 for (i = 0; i < bmap_size; i++) {
3434                         if (test_bit(i, bmap))
3435                                 __set_bit(i, async_events_bmap);
3436                 }
3437         }
3438
3439         for (i = 0; i < 8; i++)
3440                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3441
3442         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3443 }
3444
3445 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3446 {
3447         struct hwrm_func_drv_rgtr_input req = {0};
3448
3449         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3450
3451         req.enables =
3452                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3453                             FUNC_DRV_RGTR_REQ_ENABLES_VER);
3454
3455         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
3456         req.ver_maj = DRV_VER_MAJ;
3457         req.ver_min = DRV_VER_MIN;
3458         req.ver_upd = DRV_VER_UPD;
3459
3460         if (BNXT_PF(bp)) {
3461                 u32 data[8];
3462                 int i;
3463
3464                 memset(data, 0, sizeof(data));
3465                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
3466                         u16 cmd = bnxt_vf_req_snif[i];
3467                         unsigned int bit, idx;
3468
3469                         idx = cmd / 32;
3470                         bit = cmd % 32;
3471                         data[idx] |= 1 << bit;
3472                 }
3473
3474                 for (i = 0; i < 8; i++)
3475                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3476
3477                 req.enables |=
3478                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3479         }
3480
3481         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3482 }
3483
3484 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
3485 {
3486         struct hwrm_func_drv_unrgtr_input req = {0};
3487
3488         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
3489         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3490 }
3491
3492 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
3493 {
3494         u32 rc = 0;
3495         struct hwrm_tunnel_dst_port_free_input req = {0};
3496
3497         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
3498         req.tunnel_type = tunnel_type;
3499
3500         switch (tunnel_type) {
3501         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
3502                 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
3503                 break;
3504         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
3505                 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
3506                 break;
3507         default:
3508                 break;
3509         }
3510
3511         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3512         if (rc)
3513                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3514                            rc);
3515         return rc;
3516 }
3517
3518 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3519                                            u8 tunnel_type)
3520 {
3521         u32 rc = 0;
3522         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3523         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3524
3525         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
3526
3527         req.tunnel_type = tunnel_type;
3528         req.tunnel_dst_port_val = port;
3529
3530         mutex_lock(&bp->hwrm_cmd_lock);
3531         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3532         if (rc) {
3533                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3534                            rc);
3535                 goto err_out;
3536         }
3537
3538         switch (tunnel_type) {
3539         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
3540                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3541                 break;
3542         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
3543                 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
3544                 break;
3545         default:
3546                 break;
3547         }
3548
3549 err_out:
3550         mutex_unlock(&bp->hwrm_cmd_lock);
3551         return rc;
3552 }
3553
3554 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
3555 {
3556         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3557         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3558
3559         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
3560         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3561
3562         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3563         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3564         req.mask = cpu_to_le32(vnic->rx_mask);
3565         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3566 }
3567
3568 #ifdef CONFIG_RFS_ACCEL
3569 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3570                                             struct bnxt_ntuple_filter *fltr)
3571 {
3572         struct hwrm_cfa_ntuple_filter_free_input req = {0};
3573
3574         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3575         req.ntuple_filter_id = fltr->filter_id;
3576         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3577 }
3578
3579 #define BNXT_NTP_FLTR_FLAGS                                     \
3580         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
3581          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
3582          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
3583          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
3584          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
3585          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
3586          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
3587          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
3588          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
3589          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
3590          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
3591          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
3592          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
3593          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
3594
3595 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
3596                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
3597
3598 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3599                                              struct bnxt_ntuple_filter *fltr)
3600 {
3601         int rc = 0;
3602         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3603         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3604                 bp->hwrm_cmd_resp_addr;
3605         struct flow_keys *keys = &fltr->fkeys;
3606         struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3607
3608         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
3609         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
3610
3611         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3612
3613         req.ethertype = htons(ETH_P_IP);
3614         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
3615         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
3616         req.ip_protocol = keys->basic.ip_proto;
3617
3618         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
3619                 int i;
3620
3621                 req.ethertype = htons(ETH_P_IPV6);
3622                 req.ip_addr_type =
3623                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
3624                 *(struct in6_addr *)&req.src_ipaddr[0] =
3625                         keys->addrs.v6addrs.src;
3626                 *(struct in6_addr *)&req.dst_ipaddr[0] =
3627                         keys->addrs.v6addrs.dst;
3628                 for (i = 0; i < 4; i++) {
3629                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3630                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3631                 }
3632         } else {
3633                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3634                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3635                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3636                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3637         }
3638         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
3639                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
3640                 req.tunnel_type =
3641                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
3642         }
3643
3644         req.src_port = keys->ports.src;
3645         req.src_port_mask = cpu_to_be16(0xffff);
3646         req.dst_port = keys->ports.dst;
3647         req.dst_port_mask = cpu_to_be16(0xffff);
3648
3649         req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
3650         mutex_lock(&bp->hwrm_cmd_lock);
3651         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3652         if (!rc)
3653                 fltr->filter_id = resp->ntuple_filter_id;
3654         mutex_unlock(&bp->hwrm_cmd_lock);
3655         return rc;
3656 }
3657 #endif
3658
3659 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3660                                      u8 *mac_addr)
3661 {
3662         u32 rc = 0;
3663         struct hwrm_cfa_l2_filter_alloc_input req = {0};
3664         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3665
3666         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
3667         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
3668         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
3669                 req.flags |=
3670                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
3671         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
3672         req.enables =
3673                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
3674                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
3675                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3676         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3677         req.l2_addr_mask[0] = 0xff;
3678         req.l2_addr_mask[1] = 0xff;
3679         req.l2_addr_mask[2] = 0xff;
3680         req.l2_addr_mask[3] = 0xff;
3681         req.l2_addr_mask[4] = 0xff;
3682         req.l2_addr_mask[5] = 0xff;
3683
3684         mutex_lock(&bp->hwrm_cmd_lock);
3685         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3686         if (!rc)
3687                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3688                                                         resp->l2_filter_id;
3689         mutex_unlock(&bp->hwrm_cmd_lock);
3690         return rc;
3691 }
3692
3693 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3694 {
3695         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3696         int rc = 0;
3697
3698         /* Any associated ntuple filters will also be cleared by firmware. */
3699         mutex_lock(&bp->hwrm_cmd_lock);
3700         for (i = 0; i < num_of_vnics; i++) {
3701                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3702
3703                 for (j = 0; j < vnic->uc_filter_count; j++) {
3704                         struct hwrm_cfa_l2_filter_free_input req = {0};
3705
3706                         bnxt_hwrm_cmd_hdr_init(bp, &req,
3707                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
3708
3709                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
3710
3711                         rc = _hwrm_send_message(bp, &req, sizeof(req),
3712                                                 HWRM_CMD_TIMEOUT);
3713                 }
3714                 vnic->uc_filter_count = 0;
3715         }
3716         mutex_unlock(&bp->hwrm_cmd_lock);
3717
3718         return rc;
3719 }
3720
3721 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3722 {
3723         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3724         struct hwrm_vnic_tpa_cfg_input req = {0};
3725
3726         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3727
3728         if (tpa_flags) {
3729                 u16 mss = bp->dev->mtu - 40;
3730                 u32 nsegs, n, segs = 0, flags;
3731
3732                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3733                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3734                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3735                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3736                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3737                 if (tpa_flags & BNXT_FLAG_GRO)
3738                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3739
3740                 req.flags = cpu_to_le32(flags);
3741
3742                 req.enables =
3743                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
3744                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3745                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
3746
3747                 /* Number of segs are log2 units, and first packet is not
3748                  * included as part of this units.
3749                  */
3750                 if (mss <= BNXT_RX_PAGE_SIZE) {
3751                         n = BNXT_RX_PAGE_SIZE / mss;
3752                         nsegs = (MAX_SKB_FRAGS - 1) * n;
3753                 } else {
3754                         n = mss / BNXT_RX_PAGE_SIZE;
3755                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
3756                                 n++;
3757                         nsegs = (MAX_SKB_FRAGS - n) / n;
3758                 }
3759
3760                 segs = ilog2(nsegs);
3761                 req.max_agg_segs = cpu_to_le16(segs);
3762                 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
3763
3764                 req.min_agg_len = cpu_to_le32(512);
3765         }
3766         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3767
3768         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3769 }
3770
3771 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3772 {
3773         u32 i, j, max_rings;
3774         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3775         struct hwrm_vnic_rss_cfg_input req = {0};
3776
3777         if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
3778                 return 0;
3779
3780         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3781         if (set_rss) {
3782                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
3783                 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
3784                         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3785                                 max_rings = bp->rx_nr_rings - 1;
3786                         else
3787                                 max_rings = bp->rx_nr_rings;
3788                 } else {
3789                         max_rings = 1;
3790                 }
3791
3792                 /* Fill the RSS indirection table with ring group ids */
3793                 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3794                         if (j == max_rings)
3795                                 j = 0;
3796                         vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3797                 }
3798
3799                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3800                 req.hash_key_tbl_addr =
3801                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
3802         }
3803         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3804         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3805 }
3806
3807 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3808 {
3809         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3810         struct hwrm_vnic_plcmodes_cfg_input req = {0};
3811
3812         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3813         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3814                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3815                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3816         req.enables =
3817                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3818                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3819         /* thresholds not implemented in firmware yet */
3820         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3821         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3822         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3823         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3824 }
3825
3826 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
3827                                         u16 ctx_idx)
3828 {
3829         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3830
3831         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3832         req.rss_cos_lb_ctx_id =
3833                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
3834
3835         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3836         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
3837 }
3838
3839 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3840 {
3841         int i, j;
3842
3843         for (i = 0; i < bp->nr_vnics; i++) {
3844                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3845
3846                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
3847                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
3848                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
3849                 }
3850         }
3851         bp->rsscos_nr_ctxs = 0;
3852 }
3853
3854 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
3855 {
3856         int rc;
3857         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3858         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3859                                                 bp->hwrm_cmd_resp_addr;
3860
3861         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3862                                -1);
3863
3864         mutex_lock(&bp->hwrm_cmd_lock);
3865         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3866         if (!rc)
3867                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
3868                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
3869         mutex_unlock(&bp->hwrm_cmd_lock);
3870
3871         return rc;
3872 }
3873
3874 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3875 {
3876         unsigned int ring = 0, grp_idx;
3877         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3878         struct hwrm_vnic_cfg_input req = {0};
3879         u16 def_vlan = 0;
3880
3881         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3882
3883         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
3884         /* Only RSS support for now TBD: COS & LB */
3885         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
3886                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3887                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3888                                            VNIC_CFG_REQ_ENABLES_MRU);
3889         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
3890                 req.rss_rule =
3891                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
3892                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3893                                            VNIC_CFG_REQ_ENABLES_MRU);
3894                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
3895         } else {
3896                 req.rss_rule = cpu_to_le16(0xffff);
3897         }
3898
3899         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
3900             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
3901                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
3902                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
3903         } else {
3904                 req.cos_rule = cpu_to_le16(0xffff);
3905         }
3906
3907         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3908                 ring = 0;
3909         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
3910                 ring = vnic_id - 1;
3911         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
3912                 ring = bp->rx_nr_rings - 1;
3913
3914         grp_idx = bp->rx_ring[ring].bnapi->index;
3915         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3916         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3917
3918         req.lb_rule = cpu_to_le16(0xffff);
3919         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3920                               VLAN_HLEN);
3921
3922 #ifdef CONFIG_BNXT_SRIOV
3923         if (BNXT_VF(bp))
3924                 def_vlan = bp->vf.vlan;
3925 #endif
3926         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
3927                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
3928         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
3929                 req.flags |=
3930                         cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
3931
3932         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3933 }
3934
3935 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3936 {
3937         u32 rc = 0;
3938
3939         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3940                 struct hwrm_vnic_free_input req = {0};
3941
3942                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3943                 req.vnic_id =
3944                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3945
3946                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3947                 if (rc)
3948                         return rc;
3949                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3950         }
3951         return rc;
3952 }
3953
3954 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3955 {
3956         u16 i;
3957
3958         for (i = 0; i < bp->nr_vnics; i++)
3959                 bnxt_hwrm_vnic_free_one(bp, i);
3960 }
3961
3962 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
3963                                 unsigned int start_rx_ring_idx,
3964                                 unsigned int nr_rings)
3965 {
3966         int rc = 0;
3967         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
3968         struct hwrm_vnic_alloc_input req = {0};
3969         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3970
3971         /* map ring groups to this vnic */
3972         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
3973                 grp_idx = bp->rx_ring[i].bnapi->index;
3974                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
3975                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
3976                                    j, nr_rings);
3977                         break;
3978                 }
3979                 bp->vnic_info[vnic_id].fw_grp_ids[j] =
3980                                         bp->grp_info[grp_idx].fw_grp_id;
3981         }
3982
3983         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
3984         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
3985         if (vnic_id == 0)
3986                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3987
3988         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3989
3990         mutex_lock(&bp->hwrm_cmd_lock);
3991         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3992         if (!rc)
3993                 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3994         mutex_unlock(&bp->hwrm_cmd_lock);
3995         return rc;
3996 }
3997
3998 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
3999 {
4000         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4001         struct hwrm_vnic_qcaps_input req = {0};
4002         int rc;
4003
4004         if (bp->hwrm_spec_code < 0x10600)
4005                 return 0;
4006
4007         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4008         mutex_lock(&bp->hwrm_cmd_lock);
4009         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4010         if (!rc) {
4011                 if (resp->flags &
4012                     cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
4013                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
4014         }
4015         mutex_unlock(&bp->hwrm_cmd_lock);
4016         return rc;
4017 }
4018
4019 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4020 {
4021         u16 i;
4022         u32 rc = 0;
4023
4024         mutex_lock(&bp->hwrm_cmd_lock);
4025         for (i = 0; i < bp->rx_nr_rings; i++) {
4026                 struct hwrm_ring_grp_alloc_input req = {0};
4027                 struct hwrm_ring_grp_alloc_output *resp =
4028                                         bp->hwrm_cmd_resp_addr;
4029                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
4030
4031                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4032
4033                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4034                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4035                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4036                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
4037
4038                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4039                                         HWRM_CMD_TIMEOUT);
4040                 if (rc)
4041                         break;
4042
4043                 bp->grp_info[grp_idx].fw_grp_id =
4044                         le32_to_cpu(resp->ring_group_id);
4045         }
4046         mutex_unlock(&bp->hwrm_cmd_lock);
4047         return rc;
4048 }
4049
4050 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4051 {
4052         u16 i;
4053         u32 rc = 0;
4054         struct hwrm_ring_grp_free_input req = {0};
4055
4056         if (!bp->grp_info)
4057                 return 0;
4058
4059         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4060
4061         mutex_lock(&bp->hwrm_cmd_lock);
4062         for (i = 0; i < bp->cp_nr_rings; i++) {
4063                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4064                         continue;
4065                 req.ring_group_id =
4066                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
4067
4068                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4069                                         HWRM_CMD_TIMEOUT);
4070                 if (rc)
4071                         break;
4072                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4073         }
4074         mutex_unlock(&bp->hwrm_cmd_lock);
4075         return rc;
4076 }
4077
4078 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4079                                     struct bnxt_ring_struct *ring,
4080                                     u32 ring_type, u32 map_index,
4081                                     u32 stats_ctx_id)
4082 {
4083         int rc = 0, err = 0;
4084         struct hwrm_ring_alloc_input req = {0};
4085         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4086         u16 ring_id;
4087
4088         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4089
4090         req.enables = 0;
4091         if (ring->nr_pages > 1) {
4092                 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
4093                 /* Page size is in log2 units */
4094                 req.page_size = BNXT_PAGE_SHIFT;
4095                 req.page_tbl_depth = 1;
4096         } else {
4097                 req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
4098         }
4099         req.fbo = 0;
4100         /* Association of ring index with doorbell index and MSIX number */
4101         req.logical_id = cpu_to_le16(map_index);
4102
4103         switch (ring_type) {
4104         case HWRM_RING_ALLOC_TX:
4105                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4106                 /* Association of transmit ring with completion ring */
4107                 req.cmpl_ring_id =
4108                         cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
4109                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
4110                 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
4111                 req.queue_id = cpu_to_le16(ring->queue_id);
4112                 break;
4113         case HWRM_RING_ALLOC_RX:
4114                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4115                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
4116                 break;
4117         case HWRM_RING_ALLOC_AGG:
4118                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4119                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4120                 break;
4121         case HWRM_RING_ALLOC_CMPL:
4122                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
4123                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4124                 if (bp->flags & BNXT_FLAG_USING_MSIX)
4125                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4126                 break;
4127         default:
4128                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4129                            ring_type);
4130                 return -1;
4131         }
4132
4133         mutex_lock(&bp->hwrm_cmd_lock);
4134         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4135         err = le16_to_cpu(resp->error_code);
4136         ring_id = le16_to_cpu(resp->ring_id);
4137         mutex_unlock(&bp->hwrm_cmd_lock);
4138
4139         if (rc || err) {
4140                 switch (ring_type) {
4141                 case RING_FREE_REQ_RING_TYPE_L2_CMPL:
4142                         netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
4143                                    rc, err);
4144                         return -1;
4145
4146                 case RING_FREE_REQ_RING_TYPE_RX:
4147                         netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
4148                                    rc, err);
4149                         return -1;
4150
4151                 case RING_FREE_REQ_RING_TYPE_TX:
4152                         netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
4153                                    rc, err);
4154                         return -1;
4155
4156                 default:
4157                         netdev_err(bp->dev, "Invalid ring\n");
4158                         return -1;
4159                 }
4160         }
4161         ring->fw_ring_id = ring_id;
4162         return rc;
4163 }
4164
4165 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4166 {
4167         int rc;
4168
4169         if (BNXT_PF(bp)) {
4170                 struct hwrm_func_cfg_input req = {0};
4171
4172                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4173                 req.fid = cpu_to_le16(0xffff);
4174                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4175                 req.async_event_cr = cpu_to_le16(idx);
4176                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4177         } else {
4178                 struct hwrm_func_vf_cfg_input req = {0};
4179
4180                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4181                 req.enables =
4182                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4183                 req.async_event_cr = cpu_to_le16(idx);
4184                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4185         }
4186         return rc;
4187 }
4188
4189 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4190 {
4191         int i, rc = 0;
4192
4193         for (i = 0; i < bp->cp_nr_rings; i++) {
4194                 struct bnxt_napi *bnapi = bp->bnapi[i];
4195                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4196                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4197
4198                 cpr->cp_doorbell = bp->bar1 + i * 0x80;
4199                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
4200                                               INVALID_STATS_CTX_ID);
4201                 if (rc)
4202                         goto err_out;
4203                 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4204                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
4205
4206                 if (!i) {
4207                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4208                         if (rc)
4209                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
4210                 }
4211         }
4212
4213         for (i = 0; i < bp->tx_nr_rings; i++) {
4214                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4215                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4216                 u32 map_idx = txr->bnapi->index;
4217                 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
4218
4219                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
4220                                               map_idx, fw_stats_ctx);
4221                 if (rc)
4222                         goto err_out;
4223                 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
4224         }
4225
4226         for (i = 0; i < bp->rx_nr_rings; i++) {
4227                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4228                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
4229                 u32 map_idx = rxr->bnapi->index;
4230
4231                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
4232                                               map_idx, INVALID_STATS_CTX_ID);
4233                 if (rc)
4234                         goto err_out;
4235                 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
4236                 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
4237                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
4238         }
4239
4240         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4241                 for (i = 0; i < bp->rx_nr_rings; i++) {
4242                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4243                         struct bnxt_ring_struct *ring =
4244                                                 &rxr->rx_agg_ring_struct;
4245                         u32 grp_idx = rxr->bnapi->index;
4246                         u32 map_idx = grp_idx + bp->rx_nr_rings;
4247
4248                         rc = hwrm_ring_alloc_send_msg(bp, ring,
4249                                                       HWRM_RING_ALLOC_AGG,
4250                                                       map_idx,
4251                                                       INVALID_STATS_CTX_ID);
4252                         if (rc)
4253                                 goto err_out;
4254
4255                         rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
4256                         writel(DB_KEY_RX | rxr->rx_agg_prod,
4257                                rxr->rx_agg_doorbell);
4258                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
4259                 }
4260         }
4261 err_out:
4262         return rc;
4263 }
4264
4265 static int hwrm_ring_free_send_msg(struct bnxt *bp,
4266                                    struct bnxt_ring_struct *ring,
4267                                    u32 ring_type, int cmpl_ring_id)
4268 {
4269         int rc;
4270         struct hwrm_ring_free_input req = {0};
4271         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
4272         u16 error_code;
4273
4274         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
4275         req.ring_type = ring_type;
4276         req.ring_id = cpu_to_le16(ring->fw_ring_id);
4277
4278         mutex_lock(&bp->hwrm_cmd_lock);
4279         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4280         error_code = le16_to_cpu(resp->error_code);
4281         mutex_unlock(&bp->hwrm_cmd_lock);
4282
4283         if (rc || error_code) {
4284                 switch (ring_type) {
4285                 case RING_FREE_REQ_RING_TYPE_L2_CMPL:
4286                         netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
4287                                    rc);
4288                         return rc;
4289                 case RING_FREE_REQ_RING_TYPE_RX:
4290                         netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
4291                                    rc);
4292                         return rc;
4293                 case RING_FREE_REQ_RING_TYPE_TX:
4294                         netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
4295                                    rc);
4296                         return rc;
4297                 default:
4298                         netdev_err(bp->dev, "Invalid ring\n");
4299                         return -1;
4300                 }
4301         }
4302         return 0;
4303 }
4304
4305 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
4306 {
4307         int i;
4308
4309         if (!bp->bnapi)
4310                 return;
4311
4312         for (i = 0; i < bp->tx_nr_rings; i++) {
4313                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4314                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4315                 u32 grp_idx = txr->bnapi->index;
4316                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4317
4318                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4319                         hwrm_ring_free_send_msg(bp, ring,
4320                                                 RING_FREE_REQ_RING_TYPE_TX,
4321                                                 close_path ? cmpl_ring_id :
4322                                                 INVALID_HW_RING_ID);
4323                         ring->fw_ring_id = INVALID_HW_RING_ID;
4324                 }
4325         }
4326
4327         for (i = 0; i < bp->rx_nr_rings; i++) {
4328                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4329                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
4330                 u32 grp_idx = rxr->bnapi->index;
4331                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4332
4333                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4334                         hwrm_ring_free_send_msg(bp, ring,
4335                                                 RING_FREE_REQ_RING_TYPE_RX,
4336                                                 close_path ? cmpl_ring_id :
4337                                                 INVALID_HW_RING_ID);
4338                         ring->fw_ring_id = INVALID_HW_RING_ID;
4339                         bp->grp_info[grp_idx].rx_fw_ring_id =
4340                                 INVALID_HW_RING_ID;
4341                 }
4342         }
4343
4344         for (i = 0; i < bp->rx_nr_rings; i++) {
4345                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4346                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
4347                 u32 grp_idx = rxr->bnapi->index;
4348                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4349
4350                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4351                         hwrm_ring_free_send_msg(bp, ring,
4352                                                 RING_FREE_REQ_RING_TYPE_RX,
4353                                                 close_path ? cmpl_ring_id :
4354                                                 INVALID_HW_RING_ID);
4355                         ring->fw_ring_id = INVALID_HW_RING_ID;
4356                         bp->grp_info[grp_idx].agg_fw_ring_id =
4357                                 INVALID_HW_RING_ID;
4358                 }
4359         }
4360
4361         /* The completion rings are about to be freed.  After that the
4362          * IRQ doorbell will not work anymore.  So we need to disable
4363          * IRQ here.
4364          */
4365         bnxt_disable_int_sync(bp);
4366
4367         for (i = 0; i < bp->cp_nr_rings; i++) {
4368                 struct bnxt_napi *bnapi = bp->bnapi[i];
4369                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4370                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4371
4372                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4373                         hwrm_ring_free_send_msg(bp, ring,
4374                                                 RING_FREE_REQ_RING_TYPE_L2_CMPL,
4375                                                 INVALID_HW_RING_ID);
4376                         ring->fw_ring_id = INVALID_HW_RING_ID;
4377                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4378                 }
4379         }
4380 }
4381
4382 /* Caller must hold bp->hwrm_cmd_lock */
4383 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4384 {
4385         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4386         struct hwrm_func_qcfg_input req = {0};
4387         int rc;
4388
4389         if (bp->hwrm_spec_code < 0x10601)
4390                 return 0;
4391
4392         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4393         req.fid = cpu_to_le16(fid);
4394         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4395         if (!rc)
4396                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
4397
4398         return rc;
4399 }
4400
4401 static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
4402 {
4403         struct hwrm_func_cfg_input req = {0};
4404         int rc;
4405
4406         if (bp->hwrm_spec_code < 0x10601)
4407                 return 0;
4408
4409         if (BNXT_VF(bp))
4410                 return 0;
4411
4412         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4413         req.fid = cpu_to_le16(0xffff);
4414         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
4415         req.num_tx_rings = cpu_to_le16(*tx_rings);
4416         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4417         if (rc)
4418                 return rc;
4419
4420         mutex_lock(&bp->hwrm_cmd_lock);
4421         rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
4422         mutex_unlock(&bp->hwrm_cmd_lock);
4423         return rc;
4424 }
4425
4426 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
4427         u32 buf_tmrs, u16 flags,
4428         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4429 {
4430         req->flags = cpu_to_le16(flags);
4431         req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
4432         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
4433         req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
4434         req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
4435         /* Minimum time between 2 interrupts set to buf_tmr x 2 */
4436         req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
4437         req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
4438         req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
4439 }
4440
4441 int bnxt_hwrm_set_coal(struct bnxt *bp)
4442 {
4443         int i, rc = 0;
4444         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
4445                                                            req_tx = {0}, *req;
4446         u16 max_buf, max_buf_irq;
4447         u16 buf_tmr, buf_tmr_irq;
4448         u32 flags;
4449
4450         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
4451                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4452         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
4453                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4454
4455         /* Each rx completion (2 records) should be DMAed immediately.
4456          * DMA 1/4 of the completion buffers at a time.
4457          */
4458         max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
4459         /* max_buf must not be zero */
4460         max_buf = clamp_t(u16, max_buf, 1, 63);
4461         max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
4462         buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
4463         /* buf timer set to 1/4 of interrupt timer */
4464         buf_tmr = max_t(u16, buf_tmr / 4, 1);
4465         buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
4466         buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4467
4468         flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4469
4470         /* RING_IDLE generates more IRQs for lower latency.  Enable it only
4471          * if coal_ticks is less than 25 us.
4472          */
4473         if (bp->rx_coal_ticks < 25)
4474                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
4475
4476         bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4477                                   buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
4478
4479         /* max_buf must not be zero */
4480         max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
4481         max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
4482         buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
4483         /* buf timer set to 1/4 of interrupt timer */
4484         buf_tmr = max_t(u16, buf_tmr / 4, 1);
4485         buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
4486         buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4487
4488         flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4489         bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4490                                   buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
4491
4492         mutex_lock(&bp->hwrm_cmd_lock);
4493         for (i = 0; i < bp->cp_nr_rings; i++) {
4494                 struct bnxt_napi *bnapi = bp->bnapi[i];
4495
4496                 req = &req_rx;
4497                 if (!bnapi->rx_ring)
4498                         req = &req_tx;
4499                 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
4500
4501                 rc = _hwrm_send_message(bp, req, sizeof(*req),
4502                                         HWRM_CMD_TIMEOUT);
4503                 if (rc)
4504                         break;
4505         }
4506         mutex_unlock(&bp->hwrm_cmd_lock);
4507         return rc;
4508 }
4509
4510 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
4511 {
4512         int rc = 0, i;
4513         struct hwrm_stat_ctx_free_input req = {0};
4514
4515         if (!bp->bnapi)
4516                 return 0;
4517
4518         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4519                 return 0;
4520
4521         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
4522
4523         mutex_lock(&bp->hwrm_cmd_lock);
4524         for (i = 0; i < bp->cp_nr_rings; i++) {
4525                 struct bnxt_napi *bnapi = bp->bnapi[i];
4526                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4527
4528                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
4529                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
4530
4531                         rc = _hwrm_send_message(bp, &req, sizeof(req),
4532                                                 HWRM_CMD_TIMEOUT);
4533                         if (rc)
4534                                 break;
4535
4536                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4537                 }
4538         }
4539         mutex_unlock(&bp->hwrm_cmd_lock);
4540         return rc;
4541 }
4542
4543 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
4544 {
4545         int rc = 0, i;
4546         struct hwrm_stat_ctx_alloc_input req = {0};
4547         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4548
4549         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4550                 return 0;
4551
4552         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
4553
4554         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
4555
4556         mutex_lock(&bp->hwrm_cmd_lock);
4557         for (i = 0; i < bp->cp_nr_rings; i++) {
4558                 struct bnxt_napi *bnapi = bp->bnapi[i];
4559                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4560
4561                 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
4562
4563                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4564                                         HWRM_CMD_TIMEOUT);
4565                 if (rc)
4566                         break;
4567
4568                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
4569
4570                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
4571         }
4572         mutex_unlock(&bp->hwrm_cmd_lock);
4573         return rc;
4574 }
4575
4576 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4577 {
4578         struct hwrm_func_qcfg_input req = {0};
4579         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4580         int rc;
4581
4582         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4583         req.fid = cpu_to_le16(0xffff);
4584         mutex_lock(&bp->hwrm_cmd_lock);
4585         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4586         if (rc)
4587                 goto func_qcfg_exit;
4588
4589 #ifdef CONFIG_BNXT_SRIOV
4590         if (BNXT_VF(bp)) {
4591                 struct bnxt_vf_info *vf = &bp->vf;
4592
4593                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4594         }
4595 #endif
4596         if (BNXT_PF(bp)) {
4597                 u16 flags = le16_to_cpu(resp->flags);
4598
4599                 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
4600                              FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED))
4601                         bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
4602                 if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)
4603                         bp->flags |= BNXT_FLAG_MULTI_HOST;
4604         }
4605
4606         switch (resp->port_partition_type) {
4607         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4608         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
4609         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
4610                 bp->port_partition_type = resp->port_partition_type;
4611                 break;
4612         }
4613
4614 func_qcfg_exit:
4615         mutex_unlock(&bp->hwrm_cmd_lock);
4616         return rc;
4617 }
4618
4619 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
4620 {
4621         int rc = 0;
4622         struct hwrm_func_qcaps_input req = {0};
4623         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4624
4625         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
4626         req.fid = cpu_to_le16(0xffff);
4627
4628         mutex_lock(&bp->hwrm_cmd_lock);
4629         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4630         if (rc)
4631                 goto hwrm_func_qcaps_exit;
4632
4633         if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
4634                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
4635         if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
4636                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
4637
4638         bp->tx_push_thresh = 0;
4639         if (resp->flags &
4640             cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
4641                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
4642
4643         if (BNXT_PF(bp)) {
4644                 struct bnxt_pf_info *pf = &bp->pf;
4645
4646                 pf->fw_fid = le16_to_cpu(resp->fid);
4647                 pf->port_id = le16_to_cpu(resp->port_id);
4648                 bp->dev->dev_port = pf->port_id;
4649                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
4650                 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
4651                 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4652                 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4653                 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4654                 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
4655                 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4656                 if (!pf->max_hw_ring_grps)
4657                         pf->max_hw_ring_grps = pf->max_tx_rings;
4658                 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4659                 pf->max_vnics = le16_to_cpu(resp->max_vnics);
4660                 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4661                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
4662                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
4663                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
4664                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
4665                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
4666                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
4667                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
4668                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
4669                 if (resp->flags &
4670                     cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
4671                         bp->flags |= BNXT_FLAG_WOL_CAP;
4672         } else {
4673 #ifdef CONFIG_BNXT_SRIOV
4674                 struct bnxt_vf_info *vf = &bp->vf;
4675
4676                 vf->fw_fid = le16_to_cpu(resp->fid);
4677
4678                 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4679                 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4680                 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4681                 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
4682                 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4683                 if (!vf->max_hw_ring_grps)
4684                         vf->max_hw_ring_grps = vf->max_tx_rings;
4685                 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4686                 vf->max_vnics = le16_to_cpu(resp->max_vnics);
4687                 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4688
4689                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
4690                 mutex_unlock(&bp->hwrm_cmd_lock);
4691
4692                 if (is_valid_ether_addr(vf->mac_addr)) {
4693                         /* overwrite netdev dev_adr with admin VF MAC */
4694                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
4695                 } else {
4696                         eth_hw_addr_random(bp->dev);
4697                         rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
4698                 }
4699                 return rc;
4700 #endif
4701         }
4702
4703 hwrm_func_qcaps_exit:
4704         mutex_unlock(&bp->hwrm_cmd_lock);
4705         return rc;
4706 }
4707
4708 static int bnxt_hwrm_func_reset(struct bnxt *bp)
4709 {
4710         struct hwrm_func_reset_input req = {0};
4711
4712         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
4713         req.enables = 0;
4714
4715         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
4716 }
4717
4718 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
4719 {
4720         int rc = 0;
4721         struct hwrm_queue_qportcfg_input req = {0};
4722         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
4723         u8 i, *qptr;
4724
4725         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
4726
4727         mutex_lock(&bp->hwrm_cmd_lock);
4728         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4729         if (rc)
4730                 goto qportcfg_exit;
4731
4732         if (!resp->max_configurable_queues) {
4733                 rc = -EINVAL;
4734                 goto qportcfg_exit;
4735         }
4736         bp->max_tc = resp->max_configurable_queues;
4737         bp->max_lltc = resp->max_configurable_lossless_queues;
4738         if (bp->max_tc > BNXT_MAX_QUEUE)
4739                 bp->max_tc = BNXT_MAX_QUEUE;
4740
4741         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
4742                 bp->max_tc = 1;
4743
4744         if (bp->max_lltc > bp->max_tc)
4745                 bp->max_lltc = bp->max_tc;
4746
4747         qptr = &resp->queue_id0;
4748         for (i = 0; i < bp->max_tc; i++) {
4749                 bp->q_info[i].queue_id = *qptr++;
4750                 bp->q_info[i].queue_profile = *qptr++;
4751         }
4752
4753 qportcfg_exit:
4754         mutex_unlock(&bp->hwrm_cmd_lock);
4755         return rc;
4756 }
4757
4758 static int bnxt_hwrm_ver_get(struct bnxt *bp)
4759 {
4760         int rc;
4761         struct hwrm_ver_get_input req = {0};
4762         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
4763         u32 dev_caps_cfg;
4764
4765         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
4766         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
4767         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
4768         req.hwrm_intf_min = HWRM_VERSION_MINOR;
4769         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
4770         mutex_lock(&bp->hwrm_cmd_lock);
4771         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4772         if (rc)
4773                 goto hwrm_ver_get_exit;
4774
4775         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
4776
4777         bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
4778                              resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
4779         if (resp->hwrm_intf_maj < 1) {
4780                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
4781                             resp->hwrm_intf_maj, resp->hwrm_intf_min,
4782                             resp->hwrm_intf_upd);
4783                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
4784         }
4785         snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
4786                  resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
4787                  resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
4788
4789         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
4790         if (!bp->hwrm_cmd_timeout)
4791                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
4792
4793         if (resp->hwrm_intf_maj >= 1)
4794                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
4795
4796         bp->chip_num = le16_to_cpu(resp->chip_num);
4797         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
4798             !resp->chip_metal)
4799                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
4800
4801         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
4802         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
4803             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
4804                 bp->flags |= BNXT_FLAG_SHORT_CMD;
4805
4806 hwrm_ver_get_exit:
4807         mutex_unlock(&bp->hwrm_cmd_lock);
4808         return rc;
4809 }
4810
4811 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
4812 {
4813 #if IS_ENABLED(CONFIG_RTC_LIB)
4814         struct hwrm_fw_set_time_input req = {0};
4815         struct rtc_time tm;
4816         struct timeval tv;
4817
4818         if (bp->hwrm_spec_code < 0x10400)
4819                 return -EOPNOTSUPP;
4820
4821         do_gettimeofday(&tv);
4822         rtc_time_to_tm(tv.tv_sec, &tm);
4823         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
4824         req.year = cpu_to_le16(1900 + tm.tm_year);
4825         req.month = 1 + tm.tm_mon;
4826         req.day = tm.tm_mday;
4827         req.hour = tm.tm_hour;
4828         req.minute = tm.tm_min;
4829         req.second = tm.tm_sec;
4830         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4831 #else
4832         return -EOPNOTSUPP;
4833 #endif
4834 }
4835
4836 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
4837 {
4838         int rc;
4839         struct bnxt_pf_info *pf = &bp->pf;
4840         struct hwrm_port_qstats_input req = {0};
4841
4842         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
4843                 return 0;
4844
4845         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
4846         req.port_id = cpu_to_le16(pf->port_id);
4847         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
4848         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
4849         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4850         return rc;
4851 }
4852
4853 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
4854 {
4855         if (bp->vxlan_port_cnt) {
4856                 bnxt_hwrm_tunnel_dst_port_free(
4857                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
4858         }
4859         bp->vxlan_port_cnt = 0;
4860         if (bp->nge_port_cnt) {
4861                 bnxt_hwrm_tunnel_dst_port_free(
4862                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
4863         }
4864         bp->nge_port_cnt = 0;
4865 }
4866
4867 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4868 {
4869         int rc, i;
4870         u32 tpa_flags = 0;
4871
4872         if (set_tpa)
4873                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
4874         for (i = 0; i < bp->nr_vnics; i++) {
4875                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4876                 if (rc) {
4877                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4878                                    i, rc);
4879                         return rc;
4880                 }
4881         }
4882         return 0;
4883 }
4884
4885 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
4886 {
4887         int i;
4888
4889         for (i = 0; i < bp->nr_vnics; i++)
4890                 bnxt_hwrm_vnic_set_rss(bp, i, false);
4891 }
4892
4893 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
4894                                     bool irq_re_init)
4895 {
4896         if (bp->vnic_info) {
4897                 bnxt_hwrm_clear_vnic_filter(bp);
4898                 /* clear all RSS setting before free vnic ctx */
4899                 bnxt_hwrm_clear_vnic_rss(bp);
4900                 bnxt_hwrm_vnic_ctx_free(bp);
4901                 /* before free the vnic, undo the vnic tpa settings */
4902                 if (bp->flags & BNXT_FLAG_TPA)
4903                         bnxt_set_tpa(bp, false);
4904                 bnxt_hwrm_vnic_free(bp);
4905         }
4906         bnxt_hwrm_ring_free(bp, close_path);
4907         bnxt_hwrm_ring_grp_free(bp);
4908         if (irq_re_init) {
4909                 bnxt_hwrm_stat_ctx_free(bp);
4910                 bnxt_hwrm_free_tunnel_ports(bp);
4911         }
4912 }
4913
4914 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
4915 {
4916         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4917         int rc;
4918
4919         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
4920                 goto skip_rss_ctx;
4921
4922         /* allocate context for vnic */
4923         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
4924         if (rc) {
4925                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4926                            vnic_id, rc);
4927                 goto vnic_setup_err;
4928         }
4929         bp->rsscos_nr_ctxs++;
4930
4931         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4932                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
4933                 if (rc) {
4934                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
4935                                    vnic_id, rc);
4936                         goto vnic_setup_err;
4937                 }
4938                 bp->rsscos_nr_ctxs++;
4939         }
4940
4941 skip_rss_ctx:
4942         /* configure default vnic, ring grp */
4943         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
4944         if (rc) {
4945                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
4946                            vnic_id, rc);
4947                 goto vnic_setup_err;
4948         }
4949
4950         /* Enable RSS hashing on vnic */
4951         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
4952         if (rc) {
4953                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
4954                            vnic_id, rc);
4955                 goto vnic_setup_err;
4956         }
4957
4958         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4959                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
4960                 if (rc) {
4961                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
4962                                    vnic_id, rc);
4963                 }
4964         }
4965
4966 vnic_setup_err:
4967         return rc;
4968 }
4969
4970 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
4971 {
4972 #ifdef CONFIG_RFS_ACCEL
4973         int i, rc = 0;
4974
4975         for (i = 0; i < bp->rx_nr_rings; i++) {
4976                 struct bnxt_vnic_info *vnic;
4977                 u16 vnic_id = i + 1;
4978                 u16 ring_id = i;
4979
4980                 if (vnic_id >= bp->nr_vnics)
4981                         break;
4982
4983                 vnic = &bp->vnic_info[vnic_id];
4984                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
4985                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
4986                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
4987                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
4988                 if (rc) {
4989                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4990                                    vnic_id, rc);
4991                         break;
4992                 }
4993                 rc = bnxt_setup_vnic(bp, vnic_id);
4994                 if (rc)
4995                         break;
4996         }
4997         return rc;
4998 #else
4999         return 0;
5000 #endif
5001 }
5002
5003 /* Allow PF and VF with default VLAN to be in promiscuous mode */
5004 static bool bnxt_promisc_ok(struct bnxt *bp)
5005 {
5006 #ifdef CONFIG_BNXT_SRIOV
5007         if (BNXT_VF(bp) && !bp->vf.vlan)
5008                 return false;
5009 #endif
5010         return true;
5011 }
5012
5013 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
5014 {
5015         unsigned int rc = 0;
5016
5017         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
5018         if (rc) {
5019                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
5020                            rc);
5021                 return rc;
5022         }
5023
5024         rc = bnxt_hwrm_vnic_cfg(bp, 1);
5025         if (rc) {
5026                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
5027                            rc);
5028                 return rc;
5029         }
5030         return rc;
5031 }
5032
5033 static int bnxt_cfg_rx_mode(struct bnxt *);
5034 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
5035
5036 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
5037 {
5038         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5039         int rc = 0;
5040         unsigned int rx_nr_rings = bp->rx_nr_rings;
5041
5042         if (irq_re_init) {
5043                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
5044                 if (rc) {
5045                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
5046                                    rc);
5047                         goto err_out;
5048                 }
5049         }
5050
5051         rc = bnxt_hwrm_ring_alloc(bp);
5052         if (rc) {
5053                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
5054                 goto err_out;
5055         }
5056
5057         rc = bnxt_hwrm_ring_grp_alloc(bp);
5058         if (rc) {
5059                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
5060                 goto err_out;
5061         }
5062
5063         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5064                 rx_nr_rings--;
5065
5066         /* default vnic 0 */
5067         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
5068         if (rc) {
5069                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
5070                 goto err_out;
5071         }
5072
5073         rc = bnxt_setup_vnic(bp, 0);
5074         if (rc)
5075                 goto err_out;
5076
5077         if (bp->flags & BNXT_FLAG_RFS) {
5078                 rc = bnxt_alloc_rfs_vnics(bp);
5079                 if (rc)
5080                         goto err_out;
5081         }
5082
5083         if (bp->flags & BNXT_FLAG_TPA) {
5084                 rc = bnxt_set_tpa(bp, true);
5085                 if (rc)
5086                         goto err_out;
5087         }
5088
5089         if (BNXT_VF(bp))
5090                 bnxt_update_vf_mac(bp);
5091
5092         /* Filter for default vnic 0 */
5093         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
5094         if (rc) {
5095                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
5096                 goto err_out;
5097         }
5098         vnic->uc_filter_count = 1;
5099
5100         vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
5101
5102         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
5103                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5104
5105         if (bp->dev->flags & IFF_ALLMULTI) {
5106                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5107                 vnic->mc_list_count = 0;
5108         } else {
5109                 u32 mask = 0;
5110
5111                 bnxt_mc_list_updated(bp, &mask);
5112                 vnic->rx_mask |= mask;
5113         }
5114
5115         rc = bnxt_cfg_rx_mode(bp);
5116         if (rc)
5117                 goto err_out;
5118
5119         rc = bnxt_hwrm_set_coal(bp);
5120         if (rc)
5121                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
5122                                 rc);
5123
5124         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5125                 rc = bnxt_setup_nitroa0_vnic(bp);
5126                 if (rc)
5127                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
5128                                    rc);
5129         }
5130
5131         if (BNXT_VF(bp)) {
5132                 bnxt_hwrm_func_qcfg(bp);
5133                 netdev_update_features(bp->dev);
5134         }
5135
5136         return 0;
5137
5138 err_out:
5139         bnxt_hwrm_resource_free(bp, 0, true);
5140
5141         return rc;
5142 }
5143
5144 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
5145 {
5146         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
5147         return 0;
5148 }
5149
5150 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
5151 {
5152         bnxt_init_cp_rings(bp);
5153         bnxt_init_rx_rings(bp);
5154         bnxt_init_tx_rings(bp);
5155         bnxt_init_ring_grps(bp, irq_re_init);
5156         bnxt_init_vnics(bp);
5157
5158         return bnxt_init_chip(bp, irq_re_init);
5159 }
5160
5161 static int bnxt_set_real_num_queues(struct bnxt *bp)
5162 {
5163         int rc;
5164         struct net_device *dev = bp->dev;
5165
5166         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
5167                                           bp->tx_nr_rings_xdp);
5168         if (rc)
5169                 return rc;
5170
5171         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
5172         if (rc)
5173                 return rc;
5174
5175 #ifdef CONFIG_RFS_ACCEL
5176         if (bp->flags & BNXT_FLAG_RFS)
5177                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
5178 #endif
5179
5180         return rc;
5181 }
5182
5183 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5184                            bool shared)
5185 {
5186         int _rx = *rx, _tx = *tx;
5187
5188         if (shared) {
5189                 *rx = min_t(int, _rx, max);
5190                 *tx = min_t(int, _tx, max);
5191         } else {
5192                 if (max < 2)
5193                         return -ENOMEM;
5194
5195                 while (_rx + _tx > max) {
5196                         if (_rx > _tx && _rx > 1)
5197                                 _rx--;
5198                         else if (_tx > 1)
5199                                 _tx--;
5200                 }
5201                 *rx = _rx;
5202                 *tx = _tx;
5203         }
5204         return 0;
5205 }
5206
5207 static void bnxt_setup_msix(struct bnxt *bp)
5208 {
5209         const int len = sizeof(bp->irq_tbl[0].name);
5210         struct net_device *dev = bp->dev;
5211         int tcs, i;
5212
5213         tcs = netdev_get_num_tc(dev);
5214         if (tcs > 1) {
5215                 int i, off, count;
5216
5217                 for (i = 0; i < tcs; i++) {
5218                         count = bp->tx_nr_rings_per_tc;
5219                         off = i * count;
5220                         netdev_set_tc_queue(dev, i, count, off);
5221                 }
5222         }
5223
5224         for (i = 0; i < bp->cp_nr_rings; i++) {
5225                 char *attr;
5226
5227                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5228                         attr = "TxRx";
5229                 else if (i < bp->rx_nr_rings)
5230                         attr = "rx";
5231                 else
5232                         attr = "tx";
5233
5234                 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
5235                          i);
5236                 bp->irq_tbl[i].handler = bnxt_msix;
5237         }
5238 }
5239
5240 static void bnxt_setup_inta(struct bnxt *bp)
5241 {
5242         const int len = sizeof(bp->irq_tbl[0].name);
5243
5244         if (netdev_get_num_tc(bp->dev))
5245                 netdev_reset_tc(bp->dev);
5246
5247         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
5248                  0);
5249         bp->irq_tbl[0].handler = bnxt_inta;
5250 }
5251
5252 static int bnxt_setup_int_mode(struct bnxt *bp)
5253 {
5254         int rc;
5255
5256         if (bp->flags & BNXT_FLAG_USING_MSIX)
5257                 bnxt_setup_msix(bp);
5258         else
5259                 bnxt_setup_inta(bp);
5260
5261         rc = bnxt_set_real_num_queues(bp);
5262         return rc;
5263 }
5264
5265 #ifdef CONFIG_RFS_ACCEL
5266 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
5267 {
5268 #if defined(CONFIG_BNXT_SRIOV)
5269         if (BNXT_VF(bp))
5270                 return bp->vf.max_rsscos_ctxs;
5271 #endif
5272         return bp->pf.max_rsscos_ctxs;
5273 }
5274
5275 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
5276 {
5277 #if defined(CONFIG_BNXT_SRIOV)
5278         if (BNXT_VF(bp))
5279                 return bp->vf.max_vnics;
5280 #endif
5281         return bp->pf.max_vnics;
5282 }
5283 #endif
5284
5285 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
5286 {
5287 #if defined(CONFIG_BNXT_SRIOV)
5288         if (BNXT_VF(bp))
5289                 return bp->vf.max_stat_ctxs;
5290 #endif
5291         return bp->pf.max_stat_ctxs;
5292 }
5293
5294 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
5295 {
5296 #if defined(CONFIG_BNXT_SRIOV)
5297         if (BNXT_VF(bp))
5298                 bp->vf.max_stat_ctxs = max;
5299         else
5300 #endif
5301                 bp->pf.max_stat_ctxs = max;
5302 }
5303
5304 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
5305 {
5306 #if defined(CONFIG_BNXT_SRIOV)
5307         if (BNXT_VF(bp))
5308                 return bp->vf.max_cp_rings;
5309 #endif
5310         return bp->pf.max_cp_rings;
5311 }
5312
5313 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
5314 {
5315 #if defined(CONFIG_BNXT_SRIOV)
5316         if (BNXT_VF(bp))
5317                 bp->vf.max_cp_rings = max;
5318         else
5319 #endif
5320                 bp->pf.max_cp_rings = max;
5321 }
5322
5323 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
5324 {
5325 #if defined(CONFIG_BNXT_SRIOV)
5326         if (BNXT_VF(bp))
5327                 return min_t(unsigned int, bp->vf.max_irqs,
5328                              bp->vf.max_cp_rings);
5329 #endif
5330         return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
5331 }
5332
5333 void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
5334 {
5335 #if defined(CONFIG_BNXT_SRIOV)
5336         if (BNXT_VF(bp))
5337                 bp->vf.max_irqs = max_irqs;
5338         else
5339 #endif
5340                 bp->pf.max_irqs = max_irqs;
5341 }
5342
5343 static int bnxt_init_msix(struct bnxt *bp)
5344 {
5345         int i, total_vecs, rc = 0, min = 1;
5346         struct msix_entry *msix_ent;
5347
5348         total_vecs = bnxt_get_max_func_irqs(bp);
5349         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
5350         if (!msix_ent)
5351                 return -ENOMEM;
5352
5353         for (i = 0; i < total_vecs; i++) {
5354                 msix_ent[i].entry = i;
5355                 msix_ent[i].vector = 0;
5356         }
5357
5358         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
5359                 min = 2;
5360
5361         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
5362         if (total_vecs < 0) {
5363                 rc = -ENODEV;
5364                 goto msix_setup_exit;
5365         }
5366
5367         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
5368         if (bp->irq_tbl) {
5369                 for (i = 0; i < total_vecs; i++)
5370                         bp->irq_tbl[i].vector = msix_ent[i].vector;
5371
5372                 bp->total_irqs = total_vecs;
5373                 /* Trim rings based upon num of vectors allocated */
5374                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
5375                                      total_vecs, min == 1);
5376                 if (rc)
5377                         goto msix_setup_exit;
5378
5379                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5380                 bp->cp_nr_rings = (min == 1) ?
5381                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
5382                                   bp->tx_nr_rings + bp->rx_nr_rings;
5383
5384         } else {
5385                 rc = -ENOMEM;
5386                 goto msix_setup_exit;
5387         }
5388         bp->flags |= BNXT_FLAG_USING_MSIX;
5389         kfree(msix_ent);
5390         return 0;
5391
5392 msix_setup_exit:
5393         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
5394         kfree(bp->irq_tbl);
5395         bp->irq_tbl = NULL;
5396         pci_disable_msix(bp->pdev);
5397         kfree(msix_ent);
5398         return rc;
5399 }
5400
5401 static int bnxt_init_inta(struct bnxt *bp)
5402 {
5403         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
5404         if (!bp->irq_tbl)
5405                 return -ENOMEM;
5406
5407         bp->total_irqs = 1;
5408         bp->rx_nr_rings = 1;
5409         bp->tx_nr_rings = 1;
5410         bp->cp_nr_rings = 1;
5411         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5412         bp->flags |= BNXT_FLAG_SHARED_RINGS;
5413         bp->irq_tbl[0].vector = bp->pdev->irq;
5414         return 0;
5415 }
5416
5417 static int bnxt_init_int_mode(struct bnxt *bp)
5418 {
5419         int rc = 0;
5420
5421         if (bp->flags & BNXT_FLAG_MSIX_CAP)
5422                 rc = bnxt_init_msix(bp);
5423
5424         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
5425                 /* fallback to INTA */
5426                 rc = bnxt_init_inta(bp);
5427         }
5428         return rc;
5429 }
5430
5431 static void bnxt_clear_int_mode(struct bnxt *bp)
5432 {
5433         if (bp->flags & BNXT_FLAG_USING_MSIX)
5434                 pci_disable_msix(bp->pdev);
5435
5436         kfree(bp->irq_tbl);
5437         bp->irq_tbl = NULL;
5438         bp->flags &= ~BNXT_FLAG_USING_MSIX;
5439 }
5440
5441 static void bnxt_free_irq(struct bnxt *bp)
5442 {
5443         struct bnxt_irq *irq;
5444         int i;
5445
5446 #ifdef CONFIG_RFS_ACCEL
5447         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
5448         bp->dev->rx_cpu_rmap = NULL;
5449 #endif
5450         if (!bp->irq_tbl)
5451                 return;
5452
5453         for (i = 0; i < bp->cp_nr_rings; i++) {
5454                 irq = &bp->irq_tbl[i];
5455                 if (irq->requested)
5456                         free_irq(irq->vector, bp->bnapi[i]);
5457                 irq->requested = 0;
5458         }
5459 }
5460
5461 static int bnxt_request_irq(struct bnxt *bp)
5462 {
5463         int i, j, rc = 0;
5464         unsigned long flags = 0;
5465 #ifdef CONFIG_RFS_ACCEL
5466         struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
5467 #endif
5468
5469         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
5470                 flags = IRQF_SHARED;
5471
5472         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
5473                 struct bnxt_irq *irq = &bp->irq_tbl[i];
5474 #ifdef CONFIG_RFS_ACCEL
5475                 if (rmap && bp->bnapi[i]->rx_ring) {
5476                         rc = irq_cpu_rmap_add(rmap, irq->vector);
5477                         if (rc)
5478                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
5479                                             j);
5480                         j++;
5481                 }
5482 #endif
5483                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5484                                  bp->bnapi[i]);
5485                 if (rc)
5486                         break;
5487
5488                 irq->requested = 1;
5489         }
5490         return rc;
5491 }
5492
5493 static void bnxt_del_napi(struct bnxt *bp)
5494 {
5495         int i;
5496
5497         if (!bp->bnapi)
5498                 return;
5499
5500         for (i = 0; i < bp->cp_nr_rings; i++) {
5501                 struct bnxt_napi *bnapi = bp->bnapi[i];
5502
5503                 napi_hash_del(&bnapi->napi);
5504                 netif_napi_del(&bnapi->napi);
5505         }
5506         /* We called napi_hash_del() before netif_napi_del(), we need
5507          * to respect an RCU grace period before freeing napi structures.
5508          */
5509         synchronize_net();
5510 }
5511
5512 static void bnxt_init_napi(struct bnxt *bp)
5513 {
5514         int i;
5515         unsigned int cp_nr_rings = bp->cp_nr_rings;
5516         struct bnxt_napi *bnapi;
5517
5518         if (bp->flags & BNXT_FLAG_USING_MSIX) {
5519                 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5520                         cp_nr_rings--;
5521                 for (i = 0; i < cp_nr_rings; i++) {
5522                         bnapi = bp->bnapi[i];
5523                         netif_napi_add(bp->dev, &bnapi->napi,
5524                                        bnxt_poll, 64);
5525                 }
5526                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5527                         bnapi = bp->bnapi[cp_nr_rings];
5528                         netif_napi_add(bp->dev, &bnapi->napi,
5529                                        bnxt_poll_nitroa0, 64);
5530                 }
5531         } else {
5532                 bnapi = bp->bnapi[0];
5533                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
5534         }
5535 }
5536
5537 static void bnxt_disable_napi(struct bnxt *bp)
5538 {
5539         int i;
5540
5541         if (!bp->bnapi)
5542                 return;
5543
5544         for (i = 0; i < bp->cp_nr_rings; i++)
5545                 napi_disable(&bp->bnapi[i]->napi);
5546 }
5547
5548 static void bnxt_enable_napi(struct bnxt *bp)
5549 {
5550         int i;
5551
5552         for (i = 0; i < bp->cp_nr_rings; i++) {
5553                 bp->bnapi[i]->in_reset = false;
5554                 napi_enable(&bp->bnapi[i]->napi);
5555         }
5556 }
5557
5558 void bnxt_tx_disable(struct bnxt *bp)
5559 {
5560         int i;
5561         struct bnxt_tx_ring_info *txr;
5562         struct netdev_queue *txq;
5563
5564         if (bp->tx_ring) {
5565                 for (i = 0; i < bp->tx_nr_rings; i++) {
5566                         txr = &bp->tx_ring[i];
5567                         txq = netdev_get_tx_queue(bp->dev, i);
5568                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
5569                 }
5570         }
5571         /* Stop all TX queues */
5572         netif_tx_disable(bp->dev);
5573         netif_carrier_off(bp->dev);
5574 }
5575
5576 void bnxt_tx_enable(struct bnxt *bp)
5577 {
5578         int i;
5579         struct bnxt_tx_ring_info *txr;
5580         struct netdev_queue *txq;
5581
5582         for (i = 0; i < bp->tx_nr_rings; i++) {
5583                 txr = &bp->tx_ring[i];
5584                 txq = netdev_get_tx_queue(bp->dev, i);
5585                 txr->dev_state = 0;
5586         }
5587         netif_tx_wake_all_queues(bp->dev);
5588         if (bp->link_info.link_up)
5589                 netif_carrier_on(bp->dev);
5590 }
5591
5592 static void bnxt_report_link(struct bnxt *bp)
5593 {
5594         if (bp->link_info.link_up) {
5595                 const char *duplex;
5596                 const char *flow_ctrl;
5597                 u32 speed;
5598                 u16 fec;
5599
5600                 netif_carrier_on(bp->dev);
5601                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
5602                         duplex = "full";
5603                 else
5604                         duplex = "half";
5605                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
5606                         flow_ctrl = "ON - receive & transmit";
5607                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
5608                         flow_ctrl = "ON - transmit";
5609                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
5610                         flow_ctrl = "ON - receive";
5611                 else
5612                         flow_ctrl = "none";
5613                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
5614                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
5615                             speed, duplex, flow_ctrl);
5616                 if (bp->flags & BNXT_FLAG_EEE_CAP)
5617                         netdev_info(bp->dev, "EEE is %s\n",
5618                                     bp->eee.eee_active ? "active" :
5619                                                          "not active");
5620                 fec = bp->link_info.fec_cfg;
5621                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
5622                         netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
5623                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
5624                                     (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
5625                                      (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
5626         } else {
5627                 netif_carrier_off(bp->dev);
5628                 netdev_err(bp->dev, "NIC Link is Down\n");
5629         }
5630 }
5631
5632 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5633 {
5634         int rc = 0;
5635         struct hwrm_port_phy_qcaps_input req = {0};
5636         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5637         struct bnxt_link_info *link_info = &bp->link_info;
5638
5639         if (bp->hwrm_spec_code < 0x10201)
5640                 return 0;
5641
5642         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
5643
5644         mutex_lock(&bp->hwrm_cmd_lock);
5645         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5646         if (rc)
5647                 goto hwrm_phy_qcaps_exit;
5648
5649         if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
5650                 struct ethtool_eee *eee = &bp->eee;
5651                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
5652
5653                 bp->flags |= BNXT_FLAG_EEE_CAP;
5654                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5655                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
5656                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
5657                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5658                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5659         }
5660         if (resp->supported_speeds_auto_mode)
5661                 link_info->support_auto_speeds =
5662                         le16_to_cpu(resp->supported_speeds_auto_mode);
5663
5664 hwrm_phy_qcaps_exit:
5665         mutex_unlock(&bp->hwrm_cmd_lock);
5666         return rc;
5667 }
5668
5669 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5670 {
5671         int rc = 0;
5672         struct bnxt_link_info *link_info = &bp->link_info;
5673         struct hwrm_port_phy_qcfg_input req = {0};
5674         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5675         u8 link_up = link_info->link_up;
5676         u16 diff;
5677
5678         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
5679
5680         mutex_lock(&bp->hwrm_cmd_lock);
5681         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5682         if (rc) {
5683                 mutex_unlock(&bp->hwrm_cmd_lock);
5684                 return rc;
5685         }
5686
5687         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
5688         link_info->phy_link_status = resp->link;
5689         link_info->duplex =  resp->duplex;
5690         link_info->pause = resp->pause;
5691         link_info->auto_mode = resp->auto_mode;
5692         link_info->auto_pause_setting = resp->auto_pause;
5693         link_info->lp_pause = resp->link_partner_adv_pause;
5694         link_info->force_pause_setting = resp->force_pause;
5695         link_info->duplex_setting = resp->duplex;
5696         if (link_info->phy_link_status == BNXT_LINK_LINK)
5697                 link_info->link_speed = le16_to_cpu(resp->link_speed);
5698         else
5699                 link_info->link_speed = 0;
5700         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
5701         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
5702         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
5703         link_info->lp_auto_link_speeds =
5704                 le16_to_cpu(resp->link_partner_adv_speeds);
5705         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
5706         link_info->phy_ver[0] = resp->phy_maj;
5707         link_info->phy_ver[1] = resp->phy_min;
5708         link_info->phy_ver[2] = resp->phy_bld;
5709         link_info->media_type = resp->media_type;
5710         link_info->phy_type = resp->phy_type;
5711         link_info->transceiver = resp->xcvr_pkg_type;
5712         link_info->phy_addr = resp->eee_config_phy_addr &
5713                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
5714         link_info->module_status = resp->module_status;
5715
5716         if (bp->flags & BNXT_FLAG_EEE_CAP) {
5717                 struct ethtool_eee *eee = &bp->eee;
5718                 u16 fw_speeds;
5719
5720                 eee->eee_active = 0;
5721                 if (resp->eee_config_phy_addr &
5722                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
5723                         eee->eee_active = 1;
5724                         fw_speeds = le16_to_cpu(
5725                                 resp->link_partner_adv_eee_link_speed_mask);
5726                         eee->lp_advertised =
5727                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5728                 }
5729
5730                 /* Pull initial EEE config */
5731                 if (!chng_link_state) {
5732                         if (resp->eee_config_phy_addr &
5733                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
5734                                 eee->eee_enabled = 1;
5735
5736                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
5737                         eee->advertised =
5738                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5739
5740                         if (resp->eee_config_phy_addr &
5741                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
5742                                 __le32 tmr;
5743
5744                                 eee->tx_lpi_enabled = 1;
5745                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
5746                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
5747                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
5748                         }
5749                 }
5750         }
5751
5752         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
5753         if (bp->hwrm_spec_code >= 0x10504)
5754                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
5755
5756         /* TODO: need to add more logic to report VF link */
5757         if (chng_link_state) {
5758                 if (link_info->phy_link_status == BNXT_LINK_LINK)
5759                         link_info->link_up = 1;
5760                 else
5761                         link_info->link_up = 0;
5762                 if (link_up != link_info->link_up)
5763                         bnxt_report_link(bp);
5764         } else {
5765                 /* alwasy link down if not require to update link state */
5766                 link_info->link_up = 0;
5767         }
5768         mutex_unlock(&bp->hwrm_cmd_lock);
5769
5770         diff = link_info->support_auto_speeds ^ link_info->advertising;
5771         if ((link_info->support_auto_speeds | diff) !=
5772             link_info->support_auto_speeds) {
5773                 /* An advertised speed is no longer supported, so we need to
5774                  * update the advertisement settings.  Caller holds RTNL
5775                  * so we can modify link settings.
5776                  */
5777                 link_info->advertising = link_info->support_auto_speeds;
5778                 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5779                         bnxt_hwrm_set_link_setting(bp, true, false);
5780         }
5781         return 0;
5782 }
5783
5784 static void bnxt_get_port_module_status(struct bnxt *bp)
5785 {
5786         struct bnxt_link_info *link_info = &bp->link_info;
5787         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
5788         u8 module_status;
5789
5790         if (bnxt_update_link(bp, true))
5791                 return;
5792
5793         module_status = link_info->module_status;
5794         switch (module_status) {
5795         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
5796         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
5797         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
5798                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
5799                             bp->pf.port_id);
5800                 if (bp->hwrm_spec_code >= 0x10201) {
5801                         netdev_warn(bp->dev, "Module part number %s\n",
5802                                     resp->phy_vendor_partnumber);
5803                 }
5804                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
5805                         netdev_warn(bp->dev, "TX is disabled\n");
5806                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
5807                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
5808         }
5809 }
5810
5811 static void
5812 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
5813 {
5814         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
5815                 if (bp->hwrm_spec_code >= 0x10201)
5816                         req->auto_pause =
5817                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
5818                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5819                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
5820                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5821                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
5822                 req->enables |=
5823                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5824         } else {
5825                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5826                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
5827                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5828                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
5829                 req->enables |=
5830                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
5831                 if (bp->hwrm_spec_code >= 0x10201) {
5832                         req->auto_pause = req->force_pause;
5833                         req->enables |= cpu_to_le32(
5834                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5835                 }
5836         }
5837 }
5838
5839 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
5840                                       struct hwrm_port_phy_cfg_input *req)
5841 {
5842         u8 autoneg = bp->link_info.autoneg;
5843         u16 fw_link_speed = bp->link_info.req_link_speed;
5844         u16 advertising = bp->link_info.advertising;
5845
5846         if (autoneg & BNXT_AUTONEG_SPEED) {
5847                 req->auto_mode |=
5848                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
5849
5850                 req->enables |= cpu_to_le32(
5851                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
5852                 req->auto_link_speed_mask = cpu_to_le16(advertising);
5853
5854                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
5855                 req->flags |=
5856                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
5857         } else {
5858                 req->force_link_speed = cpu_to_le16(fw_link_speed);
5859                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
5860         }
5861
5862         /* tell chimp that the setting takes effect immediately */
5863         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
5864 }
5865
5866 int bnxt_hwrm_set_pause(struct bnxt *bp)
5867 {
5868         struct hwrm_port_phy_cfg_input req = {0};
5869         int rc;
5870
5871         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5872         bnxt_hwrm_set_pause_common(bp, &req);
5873
5874         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
5875             bp->link_info.force_link_chng)
5876                 bnxt_hwrm_set_link_common(bp, &req);
5877
5878         mutex_lock(&bp->hwrm_cmd_lock);
5879         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5880         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
5881                 /* since changing of pause setting doesn't trigger any link
5882                  * change event, the driver needs to update the current pause
5883                  * result upon successfully return of the phy_cfg command
5884                  */
5885                 bp->link_info.pause =
5886                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
5887                 bp->link_info.auto_pause_setting = 0;
5888                 if (!bp->link_info.force_link_chng)
5889                         bnxt_report_link(bp);
5890         }
5891         bp->link_info.force_link_chng = false;
5892         mutex_unlock(&bp->hwrm_cmd_lock);
5893         return rc;
5894 }
5895
5896 static void bnxt_hwrm_set_eee(struct bnxt *bp,
5897                               struct hwrm_port_phy_cfg_input *req)
5898 {
5899         struct ethtool_eee *eee = &bp->eee;
5900
5901         if (eee->eee_enabled) {
5902                 u16 eee_speeds;
5903                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
5904
5905                 if (eee->tx_lpi_enabled)
5906                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
5907                 else
5908                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
5909
5910                 req->flags |= cpu_to_le32(flags);
5911                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
5912                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
5913                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
5914         } else {
5915                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
5916         }
5917 }
5918
5919 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
5920 {
5921         struct hwrm_port_phy_cfg_input req = {0};
5922
5923         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5924         if (set_pause)
5925                 bnxt_hwrm_set_pause_common(bp, &req);
5926
5927         bnxt_hwrm_set_link_common(bp, &req);
5928
5929         if (set_eee)
5930                 bnxt_hwrm_set_eee(bp, &req);
5931         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5932 }
5933
5934 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
5935 {
5936         struct hwrm_port_phy_cfg_input req = {0};
5937
5938         if (!BNXT_SINGLE_PF(bp))
5939                 return 0;
5940
5941         if (pci_num_vf(bp->pdev))
5942                 return 0;
5943
5944         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5945         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
5946         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5947 }
5948
5949 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
5950 {
5951         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5952         struct hwrm_port_led_qcaps_input req = {0};
5953         struct bnxt_pf_info *pf = &bp->pf;
5954         int rc;
5955
5956         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
5957                 return 0;
5958
5959         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
5960         req.port_id = cpu_to_le16(pf->port_id);
5961         mutex_lock(&bp->hwrm_cmd_lock);
5962         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5963         if (rc) {
5964                 mutex_unlock(&bp->hwrm_cmd_lock);
5965                 return rc;
5966         }
5967         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
5968                 int i;
5969
5970                 bp->num_leds = resp->num_leds;
5971                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
5972                                                  bp->num_leds);
5973                 for (i = 0; i < bp->num_leds; i++) {
5974                         struct bnxt_led_info *led = &bp->leds[i];
5975                         __le16 caps = led->led_state_caps;
5976
5977                         if (!led->led_group_id ||
5978                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
5979                                 bp->num_leds = 0;
5980                                 break;
5981                         }
5982                 }
5983         }
5984         mutex_unlock(&bp->hwrm_cmd_lock);
5985         return 0;
5986 }
5987
5988 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
5989 {
5990         struct hwrm_wol_filter_alloc_input req = {0};
5991         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5992         int rc;
5993
5994         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
5995         req.port_id = cpu_to_le16(bp->pf.port_id);
5996         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
5997         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
5998         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
5999         mutex_lock(&bp->hwrm_cmd_lock);
6000         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6001         if (!rc)
6002                 bp->wol_filter_id = resp->wol_filter_id;
6003         mutex_unlock(&bp->hwrm_cmd_lock);
6004         return rc;
6005 }
6006
6007 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
6008 {
6009         struct hwrm_wol_filter_free_input req = {0};
6010         int rc;
6011
6012         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
6013         req.port_id = cpu_to_le16(bp->pf.port_id);
6014         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
6015         req.wol_filter_id = bp->wol_filter_id;
6016         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6017         return rc;
6018 }
6019
6020 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
6021 {
6022         struct hwrm_wol_filter_qcfg_input req = {0};
6023         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6024         u16 next_handle = 0;
6025         int rc;
6026
6027         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
6028         req.port_id = cpu_to_le16(bp->pf.port_id);
6029         req.handle = cpu_to_le16(handle);
6030         mutex_lock(&bp->hwrm_cmd_lock);
6031         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6032         if (!rc) {
6033                 next_handle = le16_to_cpu(resp->next_handle);
6034                 if (next_handle != 0) {
6035                         if (resp->wol_type ==
6036                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
6037                                 bp->wol = 1;
6038                                 bp->wol_filter_id = resp->wol_filter_id;
6039                         }
6040                 }
6041         }
6042         mutex_unlock(&bp->hwrm_cmd_lock);
6043         return next_handle;
6044 }
6045
6046 static void bnxt_get_wol_settings(struct bnxt *bp)
6047 {
6048         u16 handle = 0;
6049
6050         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
6051                 return;
6052
6053         do {
6054                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
6055         } while (handle && handle != 0xffff);
6056 }
6057
6058 static bool bnxt_eee_config_ok(struct bnxt *bp)
6059 {
6060         struct ethtool_eee *eee = &bp->eee;
6061         struct bnxt_link_info *link_info = &bp->link_info;
6062
6063         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
6064                 return true;
6065
6066         if (eee->eee_enabled) {
6067                 u32 advertising =
6068                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
6069
6070                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6071                         eee->eee_enabled = 0;
6072                         return false;
6073                 }
6074                 if (eee->advertised & ~advertising) {
6075                         eee->advertised = advertising & eee->supported;
6076                         return false;
6077                 }
6078         }
6079         return true;
6080 }
6081
6082 static int bnxt_update_phy_setting(struct bnxt *bp)
6083 {
6084         int rc;
6085         bool update_link = false;
6086         bool update_pause = false;
6087         bool update_eee = false;
6088         struct bnxt_link_info *link_info = &bp->link_info;
6089
6090         rc = bnxt_update_link(bp, true);
6091         if (rc) {
6092                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
6093                            rc);
6094                 return rc;
6095         }
6096         if (!BNXT_SINGLE_PF(bp))
6097                 return 0;
6098
6099         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
6100             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
6101             link_info->req_flow_ctrl)
6102                 update_pause = true;
6103         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
6104             link_info->force_pause_setting != link_info->req_flow_ctrl)
6105                 update_pause = true;
6106         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6107                 if (BNXT_AUTO_MODE(link_info->auto_mode))
6108                         update_link = true;
6109                 if (link_info->req_link_speed != link_info->force_link_speed)
6110                         update_link = true;
6111                 if (link_info->req_duplex != link_info->duplex_setting)
6112                         update_link = true;
6113         } else {
6114                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
6115                         update_link = true;
6116                 if (link_info->advertising != link_info->auto_link_speeds)
6117                         update_link = true;
6118         }
6119
6120         /* The last close may have shutdown the link, so need to call
6121          * PHY_CFG to bring it back up.
6122          */
6123         if (!netif_carrier_ok(bp->dev))
6124                 update_link = true;
6125
6126         if (!bnxt_eee_config_ok(bp))
6127                 update_eee = true;
6128
6129         if (update_link)
6130                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
6131         else if (update_pause)
6132                 rc = bnxt_hwrm_set_pause(bp);
6133         if (rc) {
6134                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
6135                            rc);
6136                 return rc;
6137         }
6138
6139         return rc;
6140 }
6141
6142 /* Common routine to pre-map certain register block to different GRC window.
6143  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
6144  * in PF and 3 windows in VF that can be customized to map in different
6145  * register blocks.
6146  */
6147 static void bnxt_preset_reg_win(struct bnxt *bp)
6148 {
6149         if (BNXT_PF(bp)) {
6150                 /* CAG registers map to GRC window #4 */
6151                 writel(BNXT_CAG_REG_BASE,
6152                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
6153         }
6154 }
6155
6156 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6157 {
6158         int rc = 0;
6159
6160         bnxt_preset_reg_win(bp);
6161         netif_carrier_off(bp->dev);
6162         if (irq_re_init) {
6163                 rc = bnxt_setup_int_mode(bp);
6164                 if (rc) {
6165                         netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
6166                                    rc);
6167                         return rc;
6168                 }
6169         }
6170         if ((bp->flags & BNXT_FLAG_RFS) &&
6171             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
6172                 /* disable RFS if falling back to INTA */
6173                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
6174                 bp->flags &= ~BNXT_FLAG_RFS;
6175         }
6176
6177         rc = bnxt_alloc_mem(bp, irq_re_init);
6178         if (rc) {
6179                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
6180                 goto open_err_free_mem;
6181         }
6182
6183         if (irq_re_init) {
6184                 bnxt_init_napi(bp);
6185                 rc = bnxt_request_irq(bp);
6186                 if (rc) {
6187                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
6188                         goto open_err;
6189                 }
6190         }
6191
6192         bnxt_enable_napi(bp);
6193
6194         rc = bnxt_init_nic(bp, irq_re_init);
6195         if (rc) {
6196                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
6197                 goto open_err;
6198         }
6199
6200         if (link_re_init) {
6201                 rc = bnxt_update_phy_setting(bp);
6202                 if (rc)
6203                         netdev_warn(bp->dev, "failed to update phy settings\n");
6204         }
6205
6206         if (irq_re_init)
6207                 udp_tunnel_get_rx_info(bp->dev);
6208
6209         set_bit(BNXT_STATE_OPEN, &bp->state);
6210         bnxt_enable_int(bp);
6211         /* Enable TX queues */
6212         bnxt_tx_enable(bp);
6213         mod_timer(&bp->timer, jiffies + bp->current_interval);
6214         /* Poll link status and check for SFP+ module status */
6215         bnxt_get_port_module_status(bp);
6216
6217         return 0;
6218
6219 open_err:
6220         bnxt_disable_napi(bp);
6221         bnxt_del_napi(bp);
6222
6223 open_err_free_mem:
6224         bnxt_free_skbs(bp);
6225         bnxt_free_irq(bp);
6226         bnxt_free_mem(bp, true);
6227         return rc;
6228 }
6229
6230 /* rtnl_lock held */
6231 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6232 {
6233         int rc = 0;
6234
6235         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
6236         if (rc) {
6237                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
6238                 dev_close(bp->dev);
6239         }
6240         return rc;
6241 }
6242
6243 /* rtnl_lock held, open the NIC half way by allocating all resources, but
6244  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
6245  * self tests.
6246  */
6247 int bnxt_half_open_nic(struct bnxt *bp)
6248 {
6249         int rc = 0;
6250
6251         rc = bnxt_alloc_mem(bp, false);
6252         if (rc) {
6253                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
6254                 goto half_open_err;
6255         }
6256         rc = bnxt_init_nic(bp, false);
6257         if (rc) {
6258                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
6259                 goto half_open_err;
6260         }
6261         return 0;
6262
6263 half_open_err:
6264         bnxt_free_skbs(bp);
6265         bnxt_free_mem(bp, false);
6266         dev_close(bp->dev);
6267         return rc;
6268 }
6269
6270 /* rtnl_lock held, this call can only be made after a previous successful
6271  * call to bnxt_half_open_nic().
6272  */
6273 void bnxt_half_close_nic(struct bnxt *bp)
6274 {
6275         bnxt_hwrm_resource_free(bp, false, false);
6276         bnxt_free_skbs(bp);
6277         bnxt_free_mem(bp, false);
6278 }
6279
6280 static int bnxt_open(struct net_device *dev)
6281 {
6282         struct bnxt *bp = netdev_priv(dev);
6283
6284         return __bnxt_open_nic(bp, true, true);
6285 }
6286
6287 static bool bnxt_drv_busy(struct bnxt *bp)
6288 {
6289         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
6290                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
6291 }
6292
6293 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6294 {
6295         int rc = 0;
6296
6297 #ifdef CONFIG_BNXT_SRIOV
6298         if (bp->sriov_cfg) {
6299                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
6300                                                       !bp->sriov_cfg,
6301                                                       BNXT_SRIOV_CFG_WAIT_TMO);
6302                 if (rc)
6303                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
6304         }
6305 #endif
6306         /* Change device state to avoid TX queue wake up's */
6307         bnxt_tx_disable(bp);
6308
6309         clear_bit(BNXT_STATE_OPEN, &bp->state);
6310         smp_mb__after_atomic();
6311         while (bnxt_drv_busy(bp))
6312                 msleep(20);
6313
6314         /* Flush rings and and disable interrupts */
6315         bnxt_shutdown_nic(bp, irq_re_init);
6316
6317         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
6318
6319         bnxt_disable_napi(bp);
6320         del_timer_sync(&bp->timer);
6321         bnxt_free_skbs(bp);
6322
6323         if (irq_re_init) {
6324                 bnxt_free_irq(bp);
6325                 bnxt_del_napi(bp);
6326         }
6327         bnxt_free_mem(bp, irq_re_init);
6328         return rc;
6329 }
6330
6331 static int bnxt_close(struct net_device *dev)
6332 {
6333         struct bnxt *bp = netdev_priv(dev);
6334
6335         bnxt_close_nic(bp, true, true);
6336         bnxt_hwrm_shutdown_link(bp);
6337         return 0;
6338 }
6339
6340 /* rtnl_lock held */
6341 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6342 {
6343         switch (cmd) {
6344         case SIOCGMIIPHY:
6345                 /* fallthru */
6346         case SIOCGMIIREG: {
6347                 if (!netif_running(dev))
6348                         return -EAGAIN;
6349
6350                 return 0;
6351         }
6352
6353         case SIOCSMIIREG:
6354                 if (!netif_running(dev))
6355                         return -EAGAIN;
6356
6357                 return 0;
6358
6359         default:
6360                 /* do nothing */
6361                 break;
6362         }
6363         return -EOPNOTSUPP;
6364 }
6365
6366 static void
6367 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6368 {
6369         u32 i;
6370         struct bnxt *bp = netdev_priv(dev);
6371
6372         set_bit(BNXT_STATE_READ_STATS, &bp->state);
6373         /* Make sure bnxt_close_nic() sees that we are reading stats before
6374          * we check the BNXT_STATE_OPEN flag.
6375          */
6376         smp_mb__after_atomic();
6377         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6378                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
6379                 return;
6380         }
6381
6382         /* TODO check if we need to synchronize with bnxt_close path */
6383         for (i = 0; i < bp->cp_nr_rings; i++) {
6384                 struct bnxt_napi *bnapi = bp->bnapi[i];
6385                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6386                 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
6387
6388                 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
6389                 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
6390                 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
6391
6392                 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
6393                 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
6394                 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
6395
6396                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
6397                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
6398                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
6399
6400                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
6401                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
6402                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
6403
6404                 stats->rx_missed_errors +=
6405                         le64_to_cpu(hw_stats->rx_discard_pkts);
6406
6407                 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
6408
6409                 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
6410         }
6411
6412         if (bp->flags & BNXT_FLAG_PORT_STATS) {
6413                 struct rx_port_stats *rx = bp->hw_rx_port_stats;
6414                 struct tx_port_stats *tx = bp->hw_tx_port_stats;
6415
6416                 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
6417                 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
6418                 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
6419                                           le64_to_cpu(rx->rx_ovrsz_frames) +
6420                                           le64_to_cpu(rx->rx_runt_frames);
6421                 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
6422                                    le64_to_cpu(rx->rx_jbr_frames);
6423                 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
6424                 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
6425                 stats->tx_errors = le64_to_cpu(tx->tx_err);
6426         }
6427         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
6428 }
6429
6430 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
6431 {
6432         struct net_device *dev = bp->dev;
6433         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6434         struct netdev_hw_addr *ha;
6435         u8 *haddr;
6436         int mc_count = 0;
6437         bool update = false;
6438         int off = 0;
6439
6440         netdev_for_each_mc_addr(ha, dev) {
6441                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
6442                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6443                         vnic->mc_list_count = 0;
6444                         return false;
6445                 }
6446                 haddr = ha->addr;
6447                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
6448                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
6449                         update = true;
6450                 }
6451                 off += ETH_ALEN;
6452                 mc_count++;
6453         }
6454         if (mc_count)
6455                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
6456
6457         if (mc_count != vnic->mc_list_count) {
6458                 vnic->mc_list_count = mc_count;
6459                 update = true;
6460         }
6461         return update;
6462 }
6463
6464 static bool bnxt_uc_list_updated(struct bnxt *bp)
6465 {
6466         struct net_device *dev = bp->dev;
6467         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6468         struct netdev_hw_addr *ha;
6469         int off = 0;
6470
6471         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
6472                 return true;
6473
6474         netdev_for_each_uc_addr(ha, dev) {
6475                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
6476                         return true;
6477
6478                 off += ETH_ALEN;
6479         }
6480         return false;
6481 }
6482
6483 static void bnxt_set_rx_mode(struct net_device *dev)
6484 {
6485         struct bnxt *bp = netdev_priv(dev);
6486         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6487         u32 mask = vnic->rx_mask;
6488         bool mc_update = false;
6489         bool uc_update;
6490
6491         if (!netif_running(dev))
6492                 return;
6493
6494         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
6495                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
6496                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
6497
6498         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
6499                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6500
6501         uc_update = bnxt_uc_list_updated(bp);
6502
6503         if (dev->flags & IFF_ALLMULTI) {
6504                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6505                 vnic->mc_list_count = 0;
6506         } else {
6507                 mc_update = bnxt_mc_list_updated(bp, &mask);
6508         }
6509
6510         if (mask != vnic->rx_mask || uc_update || mc_update) {
6511                 vnic->rx_mask = mask;
6512
6513                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
6514                 schedule_work(&bp->sp_task);
6515         }
6516 }
6517
6518 static int bnxt_cfg_rx_mode(struct bnxt *bp)
6519 {
6520         struct net_device *dev = bp->dev;
6521         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6522         struct netdev_hw_addr *ha;
6523         int i, off = 0, rc;
6524         bool uc_update;
6525
6526         netif_addr_lock_bh(dev);
6527         uc_update = bnxt_uc_list_updated(bp);
6528         netif_addr_unlock_bh(dev);
6529
6530         if (!uc_update)
6531                 goto skip_uc;
6532
6533         mutex_lock(&bp->hwrm_cmd_lock);
6534         for (i = 1; i < vnic->uc_filter_count; i++) {
6535                 struct hwrm_cfa_l2_filter_free_input req = {0};
6536
6537                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
6538                                        -1);
6539
6540                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
6541
6542                 rc = _hwrm_send_message(bp, &req, sizeof(req),
6543                                         HWRM_CMD_TIMEOUT);
6544         }
6545         mutex_unlock(&bp->hwrm_cmd_lock);
6546
6547         vnic->uc_filter_count = 1;
6548
6549         netif_addr_lock_bh(dev);
6550         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
6551                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6552         } else {
6553                 netdev_for_each_uc_addr(ha, dev) {
6554                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
6555                         off += ETH_ALEN;
6556                         vnic->uc_filter_count++;
6557                 }
6558         }
6559         netif_addr_unlock_bh(dev);
6560
6561         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
6562                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
6563                 if (rc) {
6564                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
6565                                    rc);
6566                         vnic->uc_filter_count = i;
6567                         return rc;
6568                 }
6569         }
6570
6571 skip_uc:
6572         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
6573         if (rc)
6574                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
6575                            rc);
6576
6577         return rc;
6578 }
6579
6580 /* If the chip and firmware supports RFS */
6581 static bool bnxt_rfs_supported(struct bnxt *bp)
6582 {
6583         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6584                 return true;
6585         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6586                 return true;
6587         return false;
6588 }
6589
6590 /* If runtime conditions support RFS */
6591 static bool bnxt_rfs_capable(struct bnxt *bp)
6592 {
6593 #ifdef CONFIG_RFS_ACCEL
6594         int vnics, max_vnics, max_rss_ctxs;
6595
6596         if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
6597                 return false;
6598
6599         vnics = 1 + bp->rx_nr_rings;
6600         max_vnics = bnxt_get_max_func_vnics(bp);
6601         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
6602
6603         /* RSS contexts not a limiting factor */
6604         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6605                 max_rss_ctxs = max_vnics;
6606         if (vnics > max_vnics || vnics > max_rss_ctxs) {
6607                 netdev_warn(bp->dev,
6608                             "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
6609                             min(max_rss_ctxs - 1, max_vnics - 1));
6610                 return false;
6611         }
6612
6613         return true;
6614 #else
6615         return false;
6616 #endif
6617 }
6618
6619 static netdev_features_t bnxt_fix_features(struct net_device *dev,
6620                                            netdev_features_t features)
6621 {
6622         struct bnxt *bp = netdev_priv(dev);
6623
6624         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
6625                 features &= ~NETIF_F_NTUPLE;
6626
6627         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
6628          * turned on or off together.
6629          */
6630         if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
6631             (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
6632                 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
6633                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6634                                       NETIF_F_HW_VLAN_STAG_RX);
6635                 else
6636                         features |= NETIF_F_HW_VLAN_CTAG_RX |
6637                                     NETIF_F_HW_VLAN_STAG_RX;
6638         }
6639 #ifdef CONFIG_BNXT_SRIOV
6640         if (BNXT_VF(bp)) {
6641                 if (bp->vf.vlan) {
6642                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6643                                       NETIF_F_HW_VLAN_STAG_RX);
6644                 }
6645         }
6646 #endif
6647         return features;
6648 }
6649
6650 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
6651 {
6652         struct bnxt *bp = netdev_priv(dev);
6653         u32 flags = bp->flags;
6654         u32 changes;
6655         int rc = 0;
6656         bool re_init = false;
6657         bool update_tpa = false;
6658
6659         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
6660         if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6661                 flags |= BNXT_FLAG_GRO;
6662         if (features & NETIF_F_LRO)
6663                 flags |= BNXT_FLAG_LRO;
6664
6665         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
6666                 flags &= ~BNXT_FLAG_TPA;
6667
6668         if (features & NETIF_F_HW_VLAN_CTAG_RX)
6669                 flags |= BNXT_FLAG_STRIP_VLAN;
6670
6671         if (features & NETIF_F_NTUPLE)
6672                 flags |= BNXT_FLAG_RFS;
6673
6674         changes = flags ^ bp->flags;
6675         if (changes & BNXT_FLAG_TPA) {
6676                 update_tpa = true;
6677                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
6678                     (flags & BNXT_FLAG_TPA) == 0)
6679                         re_init = true;
6680         }
6681
6682         if (changes & ~BNXT_FLAG_TPA)
6683                 re_init = true;
6684
6685         if (flags != bp->flags) {
6686                 u32 old_flags = bp->flags;
6687
6688                 bp->flags = flags;
6689
6690                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6691                         if (update_tpa)
6692                                 bnxt_set_ring_params(bp);
6693                         return rc;
6694                 }
6695
6696                 if (re_init) {
6697                         bnxt_close_nic(bp, false, false);
6698                         if (update_tpa)
6699                                 bnxt_set_ring_params(bp);
6700
6701                         return bnxt_open_nic(bp, false, false);
6702                 }
6703                 if (update_tpa) {
6704                         rc = bnxt_set_tpa(bp,
6705                                           (flags & BNXT_FLAG_TPA) ?
6706                                           true : false);
6707                         if (rc)
6708                                 bp->flags = old_flags;
6709                 }
6710         }
6711         return rc;
6712 }
6713
6714 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
6715 {
6716         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
6717         int i = bnapi->index;
6718
6719         if (!txr)
6720                 return;
6721
6722         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
6723                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
6724                     txr->tx_cons);
6725 }
6726
6727 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
6728 {
6729         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6730         int i = bnapi->index;
6731
6732         if (!rxr)
6733                 return;
6734
6735         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
6736                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
6737                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
6738                     rxr->rx_sw_agg_prod);
6739 }
6740
6741 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
6742 {
6743         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6744         int i = bnapi->index;
6745
6746         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
6747                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
6748 }
6749
6750 static void bnxt_dbg_dump_states(struct bnxt *bp)
6751 {
6752         int i;
6753         struct bnxt_napi *bnapi;
6754
6755         for (i = 0; i < bp->cp_nr_rings; i++) {
6756                 bnapi = bp->bnapi[i];
6757                 if (netif_msg_drv(bp)) {
6758                         bnxt_dump_tx_sw_state(bnapi);
6759                         bnxt_dump_rx_sw_state(bnapi);
6760                         bnxt_dump_cp_sw_state(bnapi);
6761                 }
6762         }
6763 }
6764
6765 static void bnxt_reset_task(struct bnxt *bp, bool silent)
6766 {
6767         if (!silent)
6768                 bnxt_dbg_dump_states(bp);
6769         if (netif_running(bp->dev)) {
6770                 int rc;
6771
6772                 if (!silent)
6773                         bnxt_ulp_stop(bp);
6774                 bnxt_close_nic(bp, false, false);
6775                 rc = bnxt_open_nic(bp, false, false);
6776                 if (!silent && !rc)
6777                         bnxt_ulp_start(bp);
6778         }
6779 }
6780
6781 static void bnxt_tx_timeout(struct net_device *dev)
6782 {
6783         struct bnxt *bp = netdev_priv(dev);
6784
6785         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
6786         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6787         schedule_work(&bp->sp_task);
6788 }
6789
6790 #ifdef CONFIG_NET_POLL_CONTROLLER
6791 static void bnxt_poll_controller(struct net_device *dev)
6792 {
6793         struct bnxt *bp = netdev_priv(dev);
6794         int i;
6795
6796         /* Only process tx rings/combined rings in netpoll mode. */
6797         for (i = 0; i < bp->tx_nr_rings; i++) {
6798                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6799
6800                 napi_schedule(&txr->bnapi->napi);
6801         }
6802 }
6803 #endif
6804
6805 static void bnxt_timer(unsigned long data)
6806 {
6807         struct bnxt *bp = (struct bnxt *)data;
6808         struct net_device *dev = bp->dev;
6809
6810         if (!netif_running(dev))
6811                 return;
6812
6813         if (atomic_read(&bp->intr_sem) != 0)
6814                 goto bnxt_restart_timer;
6815
6816         if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
6817                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6818                 schedule_work(&bp->sp_task);
6819         }
6820 bnxt_restart_timer:
6821         mod_timer(&bp->timer, jiffies + bp->current_interval);
6822 }
6823
6824 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6825 {
6826         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
6827          * set.  If the device is being closed, bnxt_close() may be holding
6828          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
6829          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6830          */
6831         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6832         rtnl_lock();
6833 }
6834
6835 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
6836 {
6837         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6838         rtnl_unlock();
6839 }
6840
6841 /* Only called from bnxt_sp_task() */
6842 static void bnxt_reset(struct bnxt *bp, bool silent)
6843 {
6844         bnxt_rtnl_lock_sp(bp);
6845         if (test_bit(BNXT_STATE_OPEN, &bp->state))
6846                 bnxt_reset_task(bp, silent);
6847         bnxt_rtnl_unlock_sp(bp);
6848 }
6849
6850 static void bnxt_cfg_ntp_filters(struct bnxt *);
6851
6852 static void bnxt_sp_task(struct work_struct *work)
6853 {
6854         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
6855
6856         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6857         smp_mb__after_atomic();
6858         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6859                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6860                 return;
6861         }
6862
6863         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
6864                 bnxt_cfg_rx_mode(bp);
6865
6866         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
6867                 bnxt_cfg_ntp_filters(bp);
6868         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
6869                 bnxt_hwrm_exec_fwd_req(bp);
6870         if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6871                 bnxt_hwrm_tunnel_dst_port_alloc(
6872                         bp, bp->vxlan_port,
6873                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6874         }
6875         if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6876                 bnxt_hwrm_tunnel_dst_port_free(
6877                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6878         }
6879         if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6880                 bnxt_hwrm_tunnel_dst_port_alloc(
6881                         bp, bp->nge_port,
6882                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6883         }
6884         if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6885                 bnxt_hwrm_tunnel_dst_port_free(
6886                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6887         }
6888         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6889                 bnxt_hwrm_port_qstats(bp);
6890
6891         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
6892          * must be the last functions to be called before exiting.
6893          */
6894         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
6895                 int rc = 0;
6896
6897                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6898                                        &bp->sp_event))
6899                         bnxt_hwrm_phy_qcaps(bp);
6900
6901                 bnxt_rtnl_lock_sp(bp);
6902                 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6903                         rc = bnxt_update_link(bp, true);
6904                 bnxt_rtnl_unlock_sp(bp);
6905                 if (rc)
6906                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6907                                    rc);
6908         }
6909         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
6910                 bnxt_rtnl_lock_sp(bp);
6911                 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6912                         bnxt_get_port_module_status(bp);
6913                 bnxt_rtnl_unlock_sp(bp);
6914         }
6915         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
6916                 bnxt_reset(bp, false);
6917
6918         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
6919                 bnxt_reset(bp, true);
6920
6921         smp_mb__before_atomic();
6922         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6923 }
6924
6925 /* Under rtnl_lock */
6926 int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
6927                        int tx_xdp)
6928 {
6929         int max_rx, max_tx, tx_sets = 1;
6930         int tx_rings_needed;
6931         int rc;
6932
6933         if (tcs)
6934                 tx_sets = tcs;
6935
6936         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
6937         if (rc)
6938                 return rc;
6939
6940         if (max_rx < rx)
6941                 return -ENOMEM;
6942
6943         tx_rings_needed = tx * tx_sets + tx_xdp;
6944         if (max_tx < tx_rings_needed)
6945                 return -ENOMEM;
6946
6947         if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) ||
6948             tx_rings_needed < (tx * tx_sets + tx_xdp))
6949                 return -ENOMEM;
6950         return 0;
6951 }
6952
6953 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
6954 {
6955         if (bp->bar2) {
6956                 pci_iounmap(pdev, bp->bar2);
6957                 bp->bar2 = NULL;
6958         }
6959
6960         if (bp->bar1) {
6961                 pci_iounmap(pdev, bp->bar1);
6962                 bp->bar1 = NULL;
6963         }
6964
6965         if (bp->bar0) {
6966                 pci_iounmap(pdev, bp->bar0);
6967                 bp->bar0 = NULL;
6968         }
6969 }
6970
6971 static void bnxt_cleanup_pci(struct bnxt *bp)
6972 {
6973         bnxt_unmap_bars(bp, bp->pdev);
6974         pci_release_regions(bp->pdev);
6975         pci_disable_device(bp->pdev);
6976 }
6977
6978 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
6979 {
6980         int rc;
6981         struct bnxt *bp = netdev_priv(dev);
6982
6983         SET_NETDEV_DEV(dev, &pdev->dev);
6984
6985         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6986         rc = pci_enable_device(pdev);
6987         if (rc) {
6988                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
6989                 goto init_err;
6990         }
6991
6992         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6993                 dev_err(&pdev->dev,
6994                         "Cannot find PCI device base address, aborting\n");
6995                 rc = -ENODEV;
6996                 goto init_err_disable;
6997         }
6998
6999         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7000         if (rc) {
7001                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7002                 goto init_err_disable;
7003         }
7004
7005         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
7006             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7007                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7008                 goto init_err_disable;
7009         }
7010
7011         pci_set_master(pdev);
7012
7013         bp->dev = dev;
7014         bp->pdev = pdev;
7015
7016         bp->bar0 = pci_ioremap_bar(pdev, 0);
7017         if (!bp->bar0) {
7018                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
7019                 rc = -ENOMEM;
7020                 goto init_err_release;
7021         }
7022
7023         bp->bar1 = pci_ioremap_bar(pdev, 2);
7024         if (!bp->bar1) {
7025                 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
7026                 rc = -ENOMEM;
7027                 goto init_err_release;
7028         }
7029
7030         bp->bar2 = pci_ioremap_bar(pdev, 4);
7031         if (!bp->bar2) {
7032                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
7033                 rc = -ENOMEM;
7034                 goto init_err_release;
7035         }
7036
7037         pci_enable_pcie_error_reporting(pdev);
7038
7039         INIT_WORK(&bp->sp_task, bnxt_sp_task);
7040
7041         spin_lock_init(&bp->ntp_fltr_lock);
7042
7043         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
7044         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
7045
7046         /* tick values in micro seconds */
7047         bp->rx_coal_ticks = 12;
7048         bp->rx_coal_bufs = 30;
7049         bp->rx_coal_ticks_irq = 1;
7050         bp->rx_coal_bufs_irq = 2;
7051
7052         bp->tx_coal_ticks = 25;
7053         bp->tx_coal_bufs = 30;
7054         bp->tx_coal_ticks_irq = 2;
7055         bp->tx_coal_bufs_irq = 2;
7056
7057         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
7058
7059         init_timer(&bp->timer);
7060         bp->timer.data = (unsigned long)bp;
7061         bp->timer.function = bnxt_timer;
7062         bp->current_interval = BNXT_TIMER_INTERVAL;
7063
7064         clear_bit(BNXT_STATE_OPEN, &bp->state);
7065         return 0;
7066
7067 init_err_release:
7068         bnxt_unmap_bars(bp, pdev);
7069         pci_release_regions(pdev);
7070
7071 init_err_disable:
7072         pci_disable_device(pdev);
7073
7074 init_err:
7075         return rc;
7076 }
7077
7078 /* rtnl_lock held */
7079 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
7080 {
7081         struct sockaddr *addr = p;
7082         struct bnxt *bp = netdev_priv(dev);
7083         int rc = 0;
7084
7085         if (!is_valid_ether_addr(addr->sa_data))
7086                 return -EADDRNOTAVAIL;
7087
7088         rc = bnxt_approve_mac(bp, addr->sa_data);
7089         if (rc)
7090                 return rc;
7091
7092         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
7093                 return 0;
7094
7095         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7096         if (netif_running(dev)) {
7097                 bnxt_close_nic(bp, false, false);
7098                 rc = bnxt_open_nic(bp, false, false);
7099         }
7100
7101         return rc;
7102 }
7103
7104 /* rtnl_lock held */
7105 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
7106 {
7107         struct bnxt *bp = netdev_priv(dev);
7108
7109         if (netif_running(dev))
7110                 bnxt_close_nic(bp, false, false);
7111
7112         dev->mtu = new_mtu;
7113         bnxt_set_ring_params(bp);
7114
7115         if (netif_running(dev))
7116                 return bnxt_open_nic(bp, false, false);
7117
7118         return 0;
7119 }
7120
7121 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
7122 {
7123         struct bnxt *bp = netdev_priv(dev);
7124         bool sh = false;
7125         int rc;
7126
7127         if (tc > bp->max_tc) {
7128                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
7129                            tc, bp->max_tc);
7130                 return -EINVAL;
7131         }
7132
7133         if (netdev_get_num_tc(dev) == tc)
7134                 return 0;
7135
7136         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7137                 sh = true;
7138
7139         rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
7140                                 sh, tc, bp->tx_nr_rings_xdp);
7141         if (rc)
7142                 return rc;
7143
7144         /* Needs to close the device and do hw resource re-allocations */
7145         if (netif_running(bp->dev))
7146                 bnxt_close_nic(bp, true, false);
7147
7148         if (tc) {
7149                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
7150                 netdev_set_num_tc(dev, tc);
7151         } else {
7152                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7153                 netdev_reset_tc(dev);
7154         }
7155         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7156                                bp->tx_nr_rings + bp->rx_nr_rings;
7157         bp->num_stat_ctxs = bp->cp_nr_rings;
7158
7159         if (netif_running(bp->dev))
7160                 return bnxt_open_nic(bp, true, false);
7161
7162         return 0;
7163 }
7164
7165 static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
7166                          __be16 proto, struct tc_to_netdev *ntc)
7167 {
7168         if (ntc->type != TC_SETUP_MQPRIO)
7169                 return -EINVAL;
7170
7171         ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
7172
7173         return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc);
7174 }
7175
7176 #ifdef CONFIG_RFS_ACCEL
7177 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
7178                             struct bnxt_ntuple_filter *f2)
7179 {
7180         struct flow_keys *keys1 = &f1->fkeys;
7181         struct flow_keys *keys2 = &f2->fkeys;
7182
7183         if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
7184             keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
7185             keys1->ports.ports == keys2->ports.ports &&
7186             keys1->basic.ip_proto == keys2->basic.ip_proto &&
7187             keys1->basic.n_proto == keys2->basic.n_proto &&
7188             keys1->control.flags == keys2->control.flags &&
7189             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
7190             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
7191                 return true;
7192
7193         return false;
7194 }
7195
7196 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
7197                               u16 rxq_index, u32 flow_id)
7198 {
7199         struct bnxt *bp = netdev_priv(dev);
7200         struct bnxt_ntuple_filter *fltr, *new_fltr;
7201         struct flow_keys *fkeys;
7202         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
7203         int rc = 0, idx, bit_id, l2_idx = 0;
7204         struct hlist_head *head;
7205
7206         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
7207                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7208                 int off = 0, j;
7209
7210                 netif_addr_lock_bh(dev);
7211                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
7212                         if (ether_addr_equal(eth->h_dest,
7213                                              vnic->uc_list + off)) {
7214                                 l2_idx = j + 1;
7215                                 break;
7216                         }
7217                 }
7218                 netif_addr_unlock_bh(dev);
7219                 if (!l2_idx)
7220                         return -EINVAL;
7221         }
7222         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
7223         if (!new_fltr)
7224                 return -ENOMEM;
7225
7226         fkeys = &new_fltr->fkeys;
7227         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
7228                 rc = -EPROTONOSUPPORT;
7229                 goto err_free;
7230         }
7231
7232         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
7233              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
7234             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
7235              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
7236                 rc = -EPROTONOSUPPORT;
7237                 goto err_free;
7238         }
7239         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
7240             bp->hwrm_spec_code < 0x10601) {
7241                 rc = -EPROTONOSUPPORT;
7242                 goto err_free;
7243         }
7244         if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
7245             bp->hwrm_spec_code < 0x10601) {
7246                 rc = -EPROTONOSUPPORT;
7247                 goto err_free;
7248         }
7249
7250         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
7251         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
7252
7253         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
7254         head = &bp->ntp_fltr_hash_tbl[idx];
7255         rcu_read_lock();
7256         hlist_for_each_entry_rcu(fltr, head, hash) {
7257                 if (bnxt_fltr_match(fltr, new_fltr)) {
7258                         rcu_read_unlock();
7259                         rc = 0;
7260                         goto err_free;
7261                 }
7262         }
7263         rcu_read_unlock();
7264
7265         spin_lock_bh(&bp->ntp_fltr_lock);
7266         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
7267                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
7268         if (bit_id < 0) {
7269                 spin_unlock_bh(&bp->ntp_fltr_lock);
7270                 rc = -ENOMEM;
7271                 goto err_free;
7272         }
7273
7274         new_fltr->sw_id = (u16)bit_id;
7275         new_fltr->flow_id = flow_id;
7276         new_fltr->l2_fltr_idx = l2_idx;
7277         new_fltr->rxq = rxq_index;
7278         hlist_add_head_rcu(&new_fltr->hash, head);
7279         bp->ntp_fltr_count++;
7280         spin_unlock_bh(&bp->ntp_fltr_lock);
7281
7282         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
7283         schedule_work(&bp->sp_task);
7284
7285         return new_fltr->sw_id;
7286
7287 err_free:
7288         kfree(new_fltr);
7289         return rc;
7290 }
7291
7292 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
7293 {
7294         int i;
7295
7296         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
7297                 struct hlist_head *head;
7298                 struct hlist_node *tmp;
7299                 struct bnxt_ntuple_filter *fltr;
7300                 int rc;
7301
7302                 head = &bp->ntp_fltr_hash_tbl[i];
7303                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
7304                         bool del = false;
7305
7306                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
7307                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
7308                                                         fltr->flow_id,
7309                                                         fltr->sw_id)) {
7310                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
7311                                                                          fltr);
7312                                         del = true;
7313                                 }
7314                         } else {
7315                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
7316                                                                        fltr);
7317                                 if (rc)
7318                                         del = true;
7319                                 else
7320                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
7321                         }
7322
7323                         if (del) {
7324                                 spin_lock_bh(&bp->ntp_fltr_lock);
7325                                 hlist_del_rcu(&fltr->hash);
7326                                 bp->ntp_fltr_count--;
7327                                 spin_unlock_bh(&bp->ntp_fltr_lock);
7328                                 synchronize_rcu();
7329                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
7330                                 kfree(fltr);
7331                         }
7332                 }
7333         }
7334         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
7335                 netdev_info(bp->dev, "Receive PF driver unload event!");
7336 }
7337
7338 #else
7339
7340 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
7341 {
7342 }
7343
7344 #endif /* CONFIG_RFS_ACCEL */
7345
7346 static void bnxt_udp_tunnel_add(struct net_device *dev,
7347                                 struct udp_tunnel_info *ti)
7348 {
7349         struct bnxt *bp = netdev_priv(dev);
7350
7351         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
7352                 return;
7353
7354         if (!netif_running(dev))
7355                 return;
7356
7357         switch (ti->type) {
7358         case UDP_TUNNEL_TYPE_VXLAN:
7359                 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
7360                         return;
7361
7362                 bp->vxlan_port_cnt++;
7363                 if (bp->vxlan_port_cnt == 1) {
7364                         bp->vxlan_port = ti->port;
7365                         set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
7366                         schedule_work(&bp->sp_task);
7367                 }
7368                 break;
7369         case UDP_TUNNEL_TYPE_GENEVE:
7370                 if (bp->nge_port_cnt && bp->nge_port != ti->port)
7371                         return;
7372
7373                 bp->nge_port_cnt++;
7374                 if (bp->nge_port_cnt == 1) {
7375                         bp->nge_port = ti->port;
7376                         set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
7377                 }
7378                 break;
7379         default:
7380                 return;
7381         }
7382
7383         schedule_work(&bp->sp_task);
7384 }
7385
7386 static void bnxt_udp_tunnel_del(struct net_device *dev,
7387                                 struct udp_tunnel_info *ti)
7388 {
7389         struct bnxt *bp = netdev_priv(dev);
7390
7391         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
7392                 return;
7393
7394         if (!netif_running(dev))
7395                 return;
7396
7397         switch (ti->type) {
7398         case UDP_TUNNEL_TYPE_VXLAN:
7399                 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
7400                         return;
7401                 bp->vxlan_port_cnt--;
7402
7403                 if (bp->vxlan_port_cnt != 0)
7404                         return;
7405
7406                 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
7407                 break;
7408         case UDP_TUNNEL_TYPE_GENEVE:
7409                 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
7410                         return;
7411                 bp->nge_port_cnt--;
7412
7413                 if (bp->nge_port_cnt != 0)
7414                         return;
7415
7416                 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
7417                 break;
7418         default:
7419                 return;
7420         }
7421
7422         schedule_work(&bp->sp_task);
7423 }
7424
7425 static const struct net_device_ops bnxt_netdev_ops = {
7426         .ndo_open               = bnxt_open,
7427         .ndo_start_xmit         = bnxt_start_xmit,
7428         .ndo_stop               = bnxt_close,
7429         .ndo_get_stats64        = bnxt_get_stats64,
7430         .ndo_set_rx_mode        = bnxt_set_rx_mode,
7431         .ndo_do_ioctl           = bnxt_ioctl,
7432         .ndo_validate_addr      = eth_validate_addr,
7433         .ndo_set_mac_address    = bnxt_change_mac_addr,
7434         .ndo_change_mtu         = bnxt_change_mtu,
7435         .ndo_fix_features       = bnxt_fix_features,
7436         .ndo_set_features       = bnxt_set_features,
7437         .ndo_tx_timeout         = bnxt_tx_timeout,
7438 #ifdef CONFIG_BNXT_SRIOV
7439         .ndo_get_vf_config      = bnxt_get_vf_config,
7440         .ndo_set_vf_mac         = bnxt_set_vf_mac,
7441         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
7442         .ndo_set_vf_rate        = bnxt_set_vf_bw,
7443         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
7444         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
7445 #endif
7446 #ifdef CONFIG_NET_POLL_CONTROLLER
7447         .ndo_poll_controller    = bnxt_poll_controller,
7448 #endif
7449         .ndo_setup_tc           = bnxt_setup_tc,
7450 #ifdef CONFIG_RFS_ACCEL
7451         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
7452 #endif
7453         .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
7454         .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
7455         .ndo_xdp                = bnxt_xdp,
7456 };
7457
7458 static void bnxt_remove_one(struct pci_dev *pdev)
7459 {
7460         struct net_device *dev = pci_get_drvdata(pdev);
7461         struct bnxt *bp = netdev_priv(dev);
7462
7463         if (BNXT_PF(bp))
7464                 bnxt_sriov_disable(bp);
7465
7466         pci_disable_pcie_error_reporting(pdev);
7467         unregister_netdev(dev);
7468         cancel_work_sync(&bp->sp_task);
7469         bp->sp_event = 0;
7470
7471         bnxt_clear_int_mode(bp);
7472         bnxt_hwrm_func_drv_unrgtr(bp);
7473         bnxt_free_hwrm_resources(bp);
7474         bnxt_free_hwrm_short_cmd_req(bp);
7475         bnxt_ethtool_free(bp);
7476         bnxt_dcb_free(bp);
7477         kfree(bp->edev);
7478         bp->edev = NULL;
7479         if (bp->xdp_prog)
7480                 bpf_prog_put(bp->xdp_prog);
7481         bnxt_cleanup_pci(bp);
7482         free_netdev(dev);
7483 }
7484
7485 static int bnxt_probe_phy(struct bnxt *bp)
7486 {
7487         int rc = 0;
7488         struct bnxt_link_info *link_info = &bp->link_info;
7489
7490         rc = bnxt_hwrm_phy_qcaps(bp);
7491         if (rc) {
7492                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
7493                            rc);
7494                 return rc;
7495         }
7496
7497         rc = bnxt_update_link(bp, false);
7498         if (rc) {
7499                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
7500                            rc);
7501                 return rc;
7502         }
7503
7504         /* Older firmware does not have supported_auto_speeds, so assume
7505          * that all supported speeds can be autonegotiated.
7506          */
7507         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
7508                 link_info->support_auto_speeds = link_info->support_speeds;
7509
7510         /*initialize the ethool setting copy with NVM settings */
7511         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
7512                 link_info->autoneg = BNXT_AUTONEG_SPEED;
7513                 if (bp->hwrm_spec_code >= 0x10201) {
7514                         if (link_info->auto_pause_setting &
7515                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
7516                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
7517                 } else {
7518                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
7519                 }
7520                 link_info->advertising = link_info->auto_link_speeds;
7521         } else {
7522                 link_info->req_link_speed = link_info->force_link_speed;
7523                 link_info->req_duplex = link_info->duplex_setting;
7524         }
7525         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
7526                 link_info->req_flow_ctrl =
7527                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
7528         else
7529                 link_info->req_flow_ctrl = link_info->force_pause_setting;
7530         return rc;
7531 }
7532
7533 static int bnxt_get_max_irq(struct pci_dev *pdev)
7534 {
7535         u16 ctrl;
7536
7537         if (!pdev->msix_cap)
7538                 return 1;
7539
7540         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
7541         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
7542 }
7543
7544 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7545                                 int *max_cp)
7546 {
7547         int max_ring_grps = 0;
7548
7549 #ifdef CONFIG_BNXT_SRIOV
7550         if (!BNXT_PF(bp)) {
7551                 *max_tx = bp->vf.max_tx_rings;
7552                 *max_rx = bp->vf.max_rx_rings;
7553                 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
7554                 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
7555                 max_ring_grps = bp->vf.max_hw_ring_grps;
7556         } else
7557 #endif
7558         {
7559                 *max_tx = bp->pf.max_tx_rings;
7560                 *max_rx = bp->pf.max_rx_rings;
7561                 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
7562                 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
7563                 max_ring_grps = bp->pf.max_hw_ring_grps;
7564         }
7565         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
7566                 *max_cp -= 1;
7567                 *max_rx -= 2;
7568         }
7569         if (bp->flags & BNXT_FLAG_AGG_RINGS)
7570                 *max_rx >>= 1;
7571         *max_rx = min_t(int, *max_rx, max_ring_grps);
7572 }
7573
7574 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
7575 {
7576         int rx, tx, cp;
7577
7578         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
7579         if (!rx || !tx || !cp)
7580                 return -ENOMEM;
7581
7582         *max_rx = rx;
7583         *max_tx = tx;
7584         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
7585 }
7586
7587 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7588                                bool shared)
7589 {
7590         int rc;
7591
7592         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
7593         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
7594                 /* Not enough rings, try disabling agg rings. */
7595                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7596                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
7597                 if (rc)
7598                         return rc;
7599                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7600                 bp->dev->hw_features &= ~NETIF_F_LRO;
7601                 bp->dev->features &= ~NETIF_F_LRO;
7602                 bnxt_set_ring_params(bp);
7603         }
7604
7605         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
7606                 int max_cp, max_stat, max_irq;
7607
7608                 /* Reserve minimum resources for RoCE */
7609                 max_cp = bnxt_get_max_func_cp_rings(bp);
7610                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
7611                 max_irq = bnxt_get_max_func_irqs(bp);
7612                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
7613                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
7614                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
7615                         return 0;
7616
7617                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
7618                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
7619                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
7620                 max_cp = min_t(int, max_cp, max_irq);
7621                 max_cp = min_t(int, max_cp, max_stat);
7622                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
7623                 if (rc)
7624                         rc = 0;
7625         }
7626         return rc;
7627 }
7628
7629 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
7630 {
7631         int dflt_rings, max_rx_rings, max_tx_rings, rc;
7632
7633         if (sh)
7634                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
7635         dflt_rings = netif_get_num_default_rss_queues();
7636         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
7637         if (rc)
7638                 return rc;
7639         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
7640         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
7641
7642         rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
7643         if (rc)
7644                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
7645
7646         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7647         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7648                                bp->tx_nr_rings + bp->rx_nr_rings;
7649         bp->num_stat_ctxs = bp->cp_nr_rings;
7650         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7651                 bp->rx_nr_rings++;
7652                 bp->cp_nr_rings++;
7653         }
7654         return rc;
7655 }
7656
7657 void bnxt_restore_pf_fw_resources(struct bnxt *bp)
7658 {
7659         ASSERT_RTNL();
7660         bnxt_hwrm_func_qcaps(bp);
7661         bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
7662 }
7663
7664 static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7665 {
7666         enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
7667         enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
7668
7669         if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
7670             speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
7671                 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
7672         else
7673                 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
7674                             speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
7675                             speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
7676                             speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
7677                             "Unknown", width);
7678 }
7679
7680 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7681 {
7682         static int version_printed;
7683         struct net_device *dev;
7684         struct bnxt *bp;
7685         int rc, max_irqs;
7686
7687         if (pci_is_bridge(pdev))
7688                 return -ENODEV;
7689
7690         if (version_printed++ == 0)
7691                 pr_info("%s", version);
7692
7693         max_irqs = bnxt_get_max_irq(pdev);
7694         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
7695         if (!dev)
7696                 return -ENOMEM;
7697
7698         bp = netdev_priv(dev);
7699
7700         if (bnxt_vf_pciid(ent->driver_data))
7701                 bp->flags |= BNXT_FLAG_VF;
7702
7703         if (pdev->msix_cap)
7704                 bp->flags |= BNXT_FLAG_MSIX_CAP;
7705
7706         rc = bnxt_init_board(pdev, dev);
7707         if (rc < 0)
7708                 goto init_err_free;
7709
7710         dev->netdev_ops = &bnxt_netdev_ops;
7711         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
7712         dev->ethtool_ops = &bnxt_ethtool_ops;
7713         pci_set_drvdata(pdev, dev);
7714
7715         rc = bnxt_alloc_hwrm_resources(bp);
7716         if (rc)
7717                 goto init_err_pci_clean;
7718
7719         mutex_init(&bp->hwrm_cmd_lock);
7720         rc = bnxt_hwrm_ver_get(bp);
7721         if (rc)
7722                 goto init_err_pci_clean;
7723
7724         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
7725                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
7726                 if (rc)
7727                         goto init_err_pci_clean;
7728         }
7729
7730         rc = bnxt_hwrm_func_reset(bp);
7731         if (rc)
7732                 goto init_err_pci_clean;
7733
7734         bnxt_hwrm_fw_set_time(bp);
7735
7736         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7737                            NETIF_F_TSO | NETIF_F_TSO6 |
7738                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7739                            NETIF_F_GSO_IPXIP4 |
7740                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7741                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
7742                            NETIF_F_RXCSUM | NETIF_F_GRO;
7743
7744         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
7745                 dev->hw_features |= NETIF_F_LRO;
7746
7747         dev->hw_enc_features =
7748                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7749                         NETIF_F_TSO | NETIF_F_TSO6 |
7750                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7751                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7752                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
7753         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
7754                                     NETIF_F_GSO_GRE_CSUM;
7755         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
7756         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
7757                             NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
7758         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
7759         dev->priv_flags |= IFF_UNICAST_FLT;
7760
7761         /* MTU range: 60 - 9500 */
7762         dev->min_mtu = ETH_ZLEN;
7763         dev->max_mtu = BNXT_MAX_MTU;
7764
7765 #ifdef CONFIG_BNXT_SRIOV
7766         init_waitqueue_head(&bp->sriov_cfg_wait);
7767 #endif
7768         bp->gro_func = bnxt_gro_func_5730x;
7769         if (BNXT_CHIP_P4_PLUS(bp))
7770                 bp->gro_func = bnxt_gro_func_5731x;
7771         else
7772                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
7773
7774         rc = bnxt_hwrm_func_drv_rgtr(bp);
7775         if (rc)
7776                 goto init_err_pci_clean;
7777
7778         rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
7779         if (rc)
7780                 goto init_err_pci_clean;
7781
7782         bp->ulp_probe = bnxt_ulp_probe;
7783
7784         /* Get the MAX capabilities for this function */
7785         rc = bnxt_hwrm_func_qcaps(bp);
7786         if (rc) {
7787                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
7788                            rc);
7789                 rc = -1;
7790                 goto init_err_pci_clean;
7791         }
7792
7793         rc = bnxt_hwrm_queue_qportcfg(bp);
7794         if (rc) {
7795                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
7796                            rc);
7797                 rc = -1;
7798                 goto init_err_pci_clean;
7799         }
7800
7801         bnxt_hwrm_func_qcfg(bp);
7802         bnxt_hwrm_port_led_qcaps(bp);
7803         bnxt_ethtool_init(bp);
7804         bnxt_dcb_init(bp);
7805
7806         bnxt_set_rx_skb_mode(bp, false);
7807         bnxt_set_tpa_flags(bp);
7808         bnxt_set_ring_params(bp);
7809         bnxt_set_max_func_irqs(bp, max_irqs);
7810         rc = bnxt_set_dflt_rings(bp, true);
7811         if (rc) {
7812                 netdev_err(bp->dev, "Not enough rings available.\n");
7813                 rc = -ENOMEM;
7814                 goto init_err_pci_clean;
7815         }
7816
7817         /* Default RSS hash cfg. */
7818         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
7819                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
7820                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
7821                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
7822         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
7823                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
7824                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
7825                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
7826         }
7827
7828         bnxt_hwrm_vnic_qcaps(bp);
7829         if (bnxt_rfs_supported(bp)) {
7830                 dev->hw_features |= NETIF_F_NTUPLE;
7831                 if (bnxt_rfs_capable(bp)) {
7832                         bp->flags |= BNXT_FLAG_RFS;
7833                         dev->features |= NETIF_F_NTUPLE;
7834                 }
7835         }
7836
7837         if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
7838                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
7839
7840         rc = bnxt_probe_phy(bp);
7841         if (rc)
7842                 goto init_err_pci_clean;
7843
7844         rc = bnxt_init_int_mode(bp);
7845         if (rc)
7846                 goto init_err_pci_clean;
7847
7848         bnxt_get_wol_settings(bp);
7849         if (bp->flags & BNXT_FLAG_WOL_CAP)
7850                 device_set_wakeup_enable(&pdev->dev, bp->wol);
7851         else
7852                 device_set_wakeup_capable(&pdev->dev, false);
7853
7854         rc = register_netdev(dev);
7855         if (rc)
7856                 goto init_err_clr_int;
7857
7858         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
7859                     board_info[ent->driver_data].name,
7860                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
7861
7862         bnxt_parse_log_pcie_link(bp);
7863
7864         return 0;
7865
7866 init_err_clr_int:
7867         bnxt_clear_int_mode(bp);
7868
7869 init_err_pci_clean:
7870         bnxt_cleanup_pci(bp);
7871
7872 init_err_free:
7873         free_netdev(dev);
7874         return rc;
7875 }
7876
7877 static void bnxt_shutdown(struct pci_dev *pdev)
7878 {
7879         struct net_device *dev = pci_get_drvdata(pdev);
7880         struct bnxt *bp;
7881
7882         if (!dev)
7883                 return;
7884
7885         rtnl_lock();
7886         bp = netdev_priv(dev);
7887         if (!bp)
7888                 goto shutdown_exit;
7889
7890         if (netif_running(dev))
7891                 dev_close(dev);
7892
7893         if (system_state == SYSTEM_POWER_OFF) {
7894                 bnxt_ulp_shutdown(bp);
7895                 bnxt_clear_int_mode(bp);
7896                 pci_wake_from_d3(pdev, bp->wol);
7897                 pci_set_power_state(pdev, PCI_D3hot);
7898         }
7899
7900 shutdown_exit:
7901         rtnl_unlock();
7902 }
7903
7904 #ifdef CONFIG_PM_SLEEP
7905 static int bnxt_suspend(struct device *device)
7906 {
7907         struct pci_dev *pdev = to_pci_dev(device);
7908         struct net_device *dev = pci_get_drvdata(pdev);
7909         struct bnxt *bp = netdev_priv(dev);
7910         int rc = 0;
7911
7912         rtnl_lock();
7913         if (netif_running(dev)) {
7914                 netif_device_detach(dev);
7915                 rc = bnxt_close(dev);
7916         }
7917         bnxt_hwrm_func_drv_unrgtr(bp);
7918         rtnl_unlock();
7919         return rc;
7920 }
7921
7922 static int bnxt_resume(struct device *device)
7923 {
7924         struct pci_dev *pdev = to_pci_dev(device);
7925         struct net_device *dev = pci_get_drvdata(pdev);
7926         struct bnxt *bp = netdev_priv(dev);
7927         int rc = 0;
7928
7929         rtnl_lock();
7930         if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
7931                 rc = -ENODEV;
7932                 goto resume_exit;
7933         }
7934         rc = bnxt_hwrm_func_reset(bp);
7935         if (rc) {
7936                 rc = -EBUSY;
7937                 goto resume_exit;
7938         }
7939         bnxt_get_wol_settings(bp);
7940         if (netif_running(dev)) {
7941                 rc = bnxt_open(dev);
7942                 if (!rc)
7943                         netif_device_attach(dev);
7944         }
7945
7946 resume_exit:
7947         rtnl_unlock();
7948         return rc;
7949 }
7950
7951 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
7952 #define BNXT_PM_OPS (&bnxt_pm_ops)
7953
7954 #else
7955
7956 #define BNXT_PM_OPS NULL
7957
7958 #endif /* CONFIG_PM_SLEEP */
7959
7960 /**
7961  * bnxt_io_error_detected - called when PCI error is detected
7962  * @pdev: Pointer to PCI device
7963  * @state: The current pci connection state
7964  *
7965  * This function is called after a PCI bus error affecting
7966  * this device has been detected.
7967  */
7968 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
7969                                                pci_channel_state_t state)
7970 {
7971         struct net_device *netdev = pci_get_drvdata(pdev);
7972         struct bnxt *bp = netdev_priv(netdev);
7973
7974         netdev_info(netdev, "PCI I/O error detected\n");
7975
7976         rtnl_lock();
7977         netif_device_detach(netdev);
7978
7979         bnxt_ulp_stop(bp);
7980
7981         if (state == pci_channel_io_perm_failure) {
7982                 rtnl_unlock();
7983                 return PCI_ERS_RESULT_DISCONNECT;
7984         }
7985
7986         if (netif_running(netdev))
7987                 bnxt_close(netdev);
7988
7989         pci_disable_device(pdev);
7990         rtnl_unlock();
7991
7992         /* Request a slot slot reset. */
7993         return PCI_ERS_RESULT_NEED_RESET;
7994 }
7995
7996 /**
7997  * bnxt_io_slot_reset - called after the pci bus has been reset.
7998  * @pdev: Pointer to PCI device
7999  *
8000  * Restart the card from scratch, as if from a cold-boot.
8001  * At this point, the card has exprienced a hard reset,
8002  * followed by fixups by BIOS, and has its config space
8003  * set up identically to what it was at cold boot.
8004  */
8005 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
8006 {
8007         struct net_device *netdev = pci_get_drvdata(pdev);
8008         struct bnxt *bp = netdev_priv(netdev);
8009         int err = 0;
8010         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8011
8012         netdev_info(bp->dev, "PCI Slot Reset\n");
8013
8014         rtnl_lock();
8015
8016         if (pci_enable_device(pdev)) {
8017                 dev_err(&pdev->dev,
8018                         "Cannot re-enable PCI device after reset.\n");
8019         } else {
8020                 pci_set_master(pdev);
8021
8022                 err = bnxt_hwrm_func_reset(bp);
8023                 if (!err && netif_running(netdev))
8024                         err = bnxt_open(netdev);
8025
8026                 if (!err) {
8027                         result = PCI_ERS_RESULT_RECOVERED;
8028                         bnxt_ulp_start(bp);
8029                 }
8030         }
8031
8032         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
8033                 dev_close(netdev);
8034
8035         rtnl_unlock();
8036
8037         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8038         if (err) {
8039                 dev_err(&pdev->dev,
8040                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8041                          err); /* non-fatal, continue */
8042         }
8043
8044         return PCI_ERS_RESULT_RECOVERED;
8045 }
8046
8047 /**
8048  * bnxt_io_resume - called when traffic can start flowing again.
8049  * @pdev: Pointer to PCI device
8050  *
8051  * This callback is called when the error recovery driver tells
8052  * us that its OK to resume normal operation.
8053  */
8054 static void bnxt_io_resume(struct pci_dev *pdev)
8055 {
8056         struct net_device *netdev = pci_get_drvdata(pdev);
8057
8058         rtnl_lock();
8059
8060         netif_device_attach(netdev);
8061
8062         rtnl_unlock();
8063 }
8064
8065 static const struct pci_error_handlers bnxt_err_handler = {
8066         .error_detected = bnxt_io_error_detected,
8067         .slot_reset     = bnxt_io_slot_reset,
8068         .resume         = bnxt_io_resume
8069 };
8070
8071 static struct pci_driver bnxt_pci_driver = {
8072         .name           = DRV_MODULE_NAME,
8073         .id_table       = bnxt_pci_tbl,
8074         .probe          = bnxt_init_one,
8075         .remove         = bnxt_remove_one,
8076         .shutdown       = bnxt_shutdown,
8077         .driver.pm      = BNXT_PM_OPS,
8078         .err_handler    = &bnxt_err_handler,
8079 #if defined(CONFIG_BNXT_SRIOV)
8080         .sriov_configure = bnxt_sriov_configure,
8081 #endif
8082 };
8083
8084 module_pci_driver(bnxt_pci_driver);