Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("ServerEngines Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL);
150         if (!mem->va)
151                 return -ENOMEM;
152         memset(mem->va, 0, mem->size);
153         return 0;
154 }
155
156 static void be_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         if (adapter->eeh_error)
161                 return;
162
163         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164                                 &reg);
165         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
167         if (!enabled && enable)
168                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else if (enabled && !enable)
170                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
171         else
172                 return;
173
174         pci_write_config_dword(adapter->pdev,
175                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176 }
177
178 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_RQ_RING_ID_MASK;
182         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_RQ_OFFSET);
186 }
187
188 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
189 {
190         u32 val = 0;
191         val |= qid & DB_TXULP_RING_ID_MASK;
192         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193
194         wmb();
195         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
196 }
197
198 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
199                 bool arm, bool clear_int, u16 num_popped)
200 {
201         u32 val = 0;
202         val |= qid & DB_EQ_RING_ID_MASK;
203         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
205
206         if (adapter->eeh_error)
207                 return;
208
209         if (arm)
210                 val |= 1 << DB_EQ_REARM_SHIFT;
211         if (clear_int)
212                 val |= 1 << DB_EQ_CLR_SHIFT;
213         val |= 1 << DB_EQ_EVNT_SHIFT;
214         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_EQ_OFFSET);
216 }
217
218 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
219 {
220         u32 val = 0;
221         val |= qid & DB_CQ_RING_ID_MASK;
222         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
224
225         if (adapter->eeh_error)
226                 return;
227
228         if (arm)
229                 val |= 1 << DB_CQ_REARM_SHIFT;
230         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
231         iowrite32(val, adapter->db + DB_CQ_OFFSET);
232 }
233
234 static int be_mac_addr_set(struct net_device *netdev, void *p)
235 {
236         struct be_adapter *adapter = netdev_priv(netdev);
237         struct sockaddr *addr = p;
238         int status = 0;
239         u8 current_mac[ETH_ALEN];
240         u32 pmac_id = adapter->pmac_id[0];
241         bool active_mac = true;
242
243         if (!is_valid_ether_addr(addr->sa_data))
244                 return -EADDRNOTAVAIL;
245
246         /* For BE VF, MAC address is already activated by PF.
247          * Hence only operation left is updating netdev->devaddr.
248          * Update it if user is passing the same MAC which was used
249          * during configuring VF MAC from PF(Hypervisor).
250          */
251         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252                 status = be_cmd_mac_addr_query(adapter, current_mac,
253                                                false, adapter->if_handle, 0);
254                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255                         goto done;
256                 else
257                         goto err;
258         }
259
260         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261                 goto done;
262
263         /* For Lancer check if any MAC is active.
264          * If active, get its mac id.
265          */
266         if (lancer_chip(adapter) && !be_physfn(adapter))
267                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268                                          &pmac_id, 0);
269
270         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271                                  adapter->if_handle,
272                                  &adapter->pmac_id[0], 0);
273
274         if (status)
275                 goto err;
276
277         if (active_mac)
278                 be_cmd_pmac_del(adapter, adapter->if_handle,
279                                 pmac_id, 0);
280 done:
281         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282         return 0;
283 err:
284         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
285         return status;
286 }
287
288 /* BE2 supports only v0 cmd */
289 static void *hw_stats_from_cmd(struct be_adapter *adapter)
290 {
291         if (BE2_chip(adapter)) {
292                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294                 return &cmd->hw_stats;
295         } else  {
296                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298                 return &cmd->hw_stats;
299         }
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308                 return &hw_stats->erx;
309         } else {
310                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312                 return &hw_stats->erx;
313         }
314 }
315
316 static void populate_be_v0_stats(struct be_adapter *adapter)
317 {
318         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
321         struct be_port_rxf_stats_v0 *port_stats =
322                                         &rxf_stats->port[adapter->port_num];
323         struct be_drv_stats *drvs = &adapter->drv_stats;
324
325         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
326         drvs->rx_pause_frames = port_stats->rx_pause_frames;
327         drvs->rx_crc_errors = port_stats->rx_crc_errors;
328         drvs->rx_control_frames = port_stats->rx_control_frames;
329         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
340         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
341         drvs->rx_dropped_header_too_small =
342                 port_stats->rx_dropped_header_too_small;
343         drvs->rx_address_mismatch_drops =
344                                         port_stats->rx_address_mismatch_drops +
345                                         port_stats->rx_vlan_mismatch_drops;
346         drvs->rx_alignment_symbol_errors =
347                 port_stats->rx_alignment_symbol_errors;
348
349         drvs->tx_pauseframes = port_stats->tx_pauseframes;
350         drvs->tx_controlframes = port_stats->tx_controlframes;
351
352         if (adapter->port_num)
353                 drvs->jabber_events = rxf_stats->port1_jabber_events;
354         else
355                 drvs->jabber_events = rxf_stats->port0_jabber_events;
356         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
357         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
358         drvs->forwarded_packets = rxf_stats->forwarded_packets;
359         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
360         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
362         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363 }
364
365 static void populate_be_v1_stats(struct be_adapter *adapter)
366 {
367         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
370         struct be_port_rxf_stats_v1 *port_stats =
371                                         &rxf_stats->port[adapter->port_num];
372         struct be_drv_stats *drvs = &adapter->drv_stats;
373
374         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
375         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
377         drvs->rx_pause_frames = port_stats->rx_pause_frames;
378         drvs->rx_crc_errors = port_stats->rx_crc_errors;
379         drvs->rx_control_frames = port_stats->rx_control_frames;
380         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390         drvs->rx_dropped_header_too_small =
391                 port_stats->rx_dropped_header_too_small;
392         drvs->rx_input_fifo_overflow_drop =
393                 port_stats->rx_input_fifo_overflow_drop;
394         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
395         drvs->rx_alignment_symbol_errors =
396                 port_stats->rx_alignment_symbol_errors;
397         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
398         drvs->tx_pauseframes = port_stats->tx_pauseframes;
399         drvs->tx_controlframes = port_stats->tx_controlframes;
400         drvs->jabber_events = port_stats->jabber_events;
401         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
402         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
403         drvs->forwarded_packets = rxf_stats->forwarded_packets;
404         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
405         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
407         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408 }
409
410 static void populate_lancer_stats(struct be_adapter *adapter)
411 {
412
413         struct be_drv_stats *drvs = &adapter->drv_stats;
414         struct lancer_pport_stats *pport_stats =
415                                         pport_stats_from_cmd(adapter);
416
417         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
421         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
422         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
423         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427         drvs->rx_dropped_tcp_length =
428                                 pport_stats->rx_dropped_invalid_tcp_length;
429         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432         drvs->rx_dropped_header_too_small =
433                                 pport_stats->rx_dropped_header_too_small;
434         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
435         drvs->rx_address_mismatch_drops =
436                                         pport_stats->rx_address_mismatch_drops +
437                                         pport_stats->rx_vlan_mismatch_drops;
438         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
439         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
440         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
442         drvs->jabber_events = pport_stats->rx_jabbers;
443         drvs->forwarded_packets = pport_stats->num_forwards_lo;
444         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
445         drvs->rx_drops_too_many_frags =
446                                 pport_stats->rx_drops_too_many_frags_lo;
447 }
448
449 static void accumulate_16bit_val(u32 *acc, u16 val)
450 {
451 #define lo(x)                   (x & 0xFFFF)
452 #define hi(x)                   (x & 0xFFFF0000)
453         bool wrapped = val < lo(*acc);
454         u32 newacc = hi(*acc) + val;
455
456         if (wrapped)
457                 newacc += 65536;
458         ACCESS_ONCE(*acc) = newacc;
459 }
460
461 void be_parse_stats(struct be_adapter *adapter)
462 {
463         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464         struct be_rx_obj *rxo;
465         int i;
466
467         if (lancer_chip(adapter)) {
468                 populate_lancer_stats(adapter);
469         } else {
470                 if (BE2_chip(adapter))
471                         populate_be_v0_stats(adapter);
472                 else
473                         /* for BE3 and Skyhawk */
474                         populate_be_v1_stats(adapter);
475
476                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477                 for_all_rx_queues(adapter, rxo, i) {
478                         /* below erx HW counter can actually wrap around after
479                          * 65535. Driver accumulates a 32-bit value
480                          */
481                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482                                              (u16)erx->rx_drops_no_fragments \
483                                              [rxo->q.id]);
484                 }
485         }
486 }
487
488 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489                                         struct rtnl_link_stats64 *stats)
490 {
491         struct be_adapter *adapter = netdev_priv(netdev);
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct be_rx_obj *rxo;
494         struct be_tx_obj *txo;
495         u64 pkts, bytes;
496         unsigned int start;
497         int i;
498
499         for_all_rx_queues(adapter, rxo, i) {
500                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501                 do {
502                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503                         pkts = rx_stats(rxo)->rx_pkts;
504                         bytes = rx_stats(rxo)->rx_bytes;
505                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506                 stats->rx_packets += pkts;
507                 stats->rx_bytes += bytes;
508                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510                                         rx_stats(rxo)->rx_drops_no_frags;
511         }
512
513         for_all_tx_queues(adapter, txo, i) {
514                 const struct be_tx_stats *tx_stats = tx_stats(txo);
515                 do {
516                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517                         pkts = tx_stats(txo)->tx_pkts;
518                         bytes = tx_stats(txo)->tx_bytes;
519                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520                 stats->tx_packets += pkts;
521                 stats->tx_bytes += bytes;
522         }
523
524         /* bad pkts received */
525         stats->rx_errors = drvs->rx_crc_errors +
526                 drvs->rx_alignment_symbol_errors +
527                 drvs->rx_in_range_errors +
528                 drvs->rx_out_range_errors +
529                 drvs->rx_frame_too_long +
530                 drvs->rx_dropped_too_small +
531                 drvs->rx_dropped_too_short +
532                 drvs->rx_dropped_header_too_small +
533                 drvs->rx_dropped_tcp_length +
534                 drvs->rx_dropped_runt;
535
536         /* detailed rx errors */
537         stats->rx_length_errors = drvs->rx_in_range_errors +
538                 drvs->rx_out_range_errors +
539                 drvs->rx_frame_too_long;
540
541         stats->rx_crc_errors = drvs->rx_crc_errors;
542
543         /* frame alignment errors */
544         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
545
546         /* receiver fifo overrun */
547         /* drops_no_pbuf is no per i/f, it's per BE card */
548         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
549                                 drvs->rx_input_fifo_overflow_drop +
550                                 drvs->rx_drops_no_pbuf;
551         return stats;
552 }
553
554 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
555 {
556         struct net_device *netdev = adapter->netdev;
557
558         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
559                 netif_carrier_off(netdev);
560                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
561         }
562
563         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564                 netif_carrier_on(netdev);
565         else
566                 netif_carrier_off(netdev);
567 }
568
569 static void be_tx_stats_update(struct be_tx_obj *txo,
570                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
571 {
572         struct be_tx_stats *stats = tx_stats(txo);
573
574         u64_stats_update_begin(&stats->sync);
575         stats->tx_reqs++;
576         stats->tx_wrbs += wrb_cnt;
577         stats->tx_bytes += copied;
578         stats->tx_pkts += (gso_segs ? gso_segs : 1);
579         if (stopped)
580                 stats->tx_stops++;
581         u64_stats_update_end(&stats->sync);
582 }
583
584 /* Determine number of WRB entries needed to xmit data in an skb */
585 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586                                                                 bool *dummy)
587 {
588         int cnt = (skb->len > skb->data_len);
589
590         cnt += skb_shinfo(skb)->nr_frags;
591
592         /* to account for hdr wrb */
593         cnt++;
594         if (lancer_chip(adapter) || !(cnt & 1)) {
595                 *dummy = false;
596         } else {
597                 /* add a dummy to make it an even num */
598                 cnt++;
599                 *dummy = true;
600         }
601         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602         return cnt;
603 }
604
605 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606 {
607         wrb->frag_pa_hi = upper_32_bits(addr);
608         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
610         wrb->rsvd0 = 0;
611 }
612
613 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614                                         struct sk_buff *skb)
615 {
616         u8 vlan_prio;
617         u16 vlan_tag;
618
619         vlan_tag = vlan_tx_tag_get(skb);
620         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621         /* If vlan priority provided by OS is NOT in available bmap */
622         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624                                 adapter->recommended_prio;
625
626         return vlan_tag;
627 }
628
629 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630 {
631         return vlan_tx_tag_present(skb) || adapter->pvid;
632 }
633
634 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
636 {
637         u16 vlan_tag;
638
639         memset(hdr, 0, sizeof(*hdr));
640
641         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
643         if (skb_is_gso(skb)) {
644                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646                         hdr, skb_shinfo(skb)->gso_size);
647                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
648                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
649         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650                 if (is_tcp_pkt(skb))
651                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652                 else if (is_udp_pkt(skb))
653                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654         }
655
656         if (vlan_tx_tag_present(skb)) {
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
658                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
659                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
660         }
661
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666 }
667
668 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
669                 bool unmap_single)
670 {
671         dma_addr_t dma;
672
673         be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
676         if (wrb->frag_len) {
677                 if (unmap_single)
678                         dma_unmap_single(dev, dma, wrb->frag_len,
679                                          DMA_TO_DEVICE);
680                 else
681                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
682         }
683 }
684
685 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
686                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687 {
688         dma_addr_t busaddr;
689         int i, copied = 0;
690         struct device *dev = &adapter->pdev->dev;
691         struct sk_buff *first_skb = skb;
692         struct be_eth_wrb *wrb;
693         struct be_eth_hdr_wrb *hdr;
694         bool map_single = false;
695         u16 map_head;
696
697         hdr = queue_head_node(txq);
698         queue_head_inc(txq);
699         map_head = txq->head;
700
701         if (skb->len > skb->data_len) {
702                 int len = skb_headlen(skb);
703                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704                 if (dma_mapping_error(dev, busaddr))
705                         goto dma_err;
706                 map_single = true;
707                 wrb = queue_head_node(txq);
708                 wrb_fill(wrb, busaddr, len);
709                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710                 queue_head_inc(txq);
711                 copied += len;
712         }
713
714         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715                 const struct skb_frag_struct *frag =
716                         &skb_shinfo(skb)->frags[i];
717                 busaddr = skb_frag_dma_map(dev, frag, 0,
718                                            skb_frag_size(frag), DMA_TO_DEVICE);
719                 if (dma_mapping_error(dev, busaddr))
720                         goto dma_err;
721                 wrb = queue_head_node(txq);
722                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
723                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724                 queue_head_inc(txq);
725                 copied += skb_frag_size(frag);
726         }
727
728         if (dummy_wrb) {
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, 0, 0);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733         }
734
735         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
736         be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738         return copied;
739 dma_err:
740         txq->head = map_head;
741         while (copied) {
742                 wrb = queue_head_node(txq);
743                 unmap_tx_frag(dev, wrb, map_single);
744                 map_single = false;
745                 copied -= wrb->frag_len;
746                 queue_head_inc(txq);
747         }
748         return 0;
749 }
750
751 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752                                              struct sk_buff *skb)
753 {
754         u16 vlan_tag = 0;
755
756         skb = skb_share_check(skb, GFP_ATOMIC);
757         if (unlikely(!skb))
758                 return skb;
759
760         if (vlan_tx_tag_present(skb)) {
761                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762                 __vlan_put_tag(skb, vlan_tag);
763                 skb->vlan_tci = 0;
764         }
765
766         return skb;
767 }
768
769 static netdev_tx_t be_xmit(struct sk_buff *skb,
770                         struct net_device *netdev)
771 {
772         struct be_adapter *adapter = netdev_priv(netdev);
773         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
774         struct be_queue_info *txq = &txo->q;
775         struct iphdr *ip = NULL;
776         u32 wrb_cnt = 0, copied = 0;
777         u32 start = txq->head, eth_hdr_len;
778         bool dummy_wrb, stopped = false;
779
780         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
781                 VLAN_ETH_HLEN : ETH_HLEN;
782
783         /* HW has a bug which considers padding bytes as legal
784          * and modifies the IPv4 hdr's 'tot_len' field
785          */
786         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
787                         is_ipv4_pkt(skb)) {
788                 ip = (struct iphdr *)ip_hdr(skb);
789                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
790         }
791
792         /* HW has a bug wherein it will calculate CSUM for VLAN
793          * pkts even though it is disabled.
794          * Manually insert VLAN in pkt.
795          */
796         if (skb->ip_summed != CHECKSUM_PARTIAL &&
797                         be_vlan_tag_chk(adapter, skb)) {
798                 skb = be_insert_vlan_in_pkt(adapter, skb);
799                 if (unlikely(!skb))
800                         goto tx_drop;
801         }
802
803         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
804
805         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
806         if (copied) {
807                 int gso_segs = skb_shinfo(skb)->gso_segs;
808
809                 /* record the sent skb in the sent_skb table */
810                 BUG_ON(txo->sent_skb_list[start]);
811                 txo->sent_skb_list[start] = skb;
812
813                 /* Ensure txq has space for the next skb; Else stop the queue
814                  * *BEFORE* ringing the tx doorbell, so that we serialze the
815                  * tx compls of the current transmit which'll wake up the queue
816                  */
817                 atomic_add(wrb_cnt, &txq->used);
818                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
819                                                                 txq->len) {
820                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
821                         stopped = true;
822                 }
823
824                 be_txq_notify(adapter, txq->id, wrb_cnt);
825
826                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
827         } else {
828                 txq->head = start;
829                 dev_kfree_skb_any(skb);
830         }
831 tx_drop:
832         return NETDEV_TX_OK;
833 }
834
835 static int be_change_mtu(struct net_device *netdev, int new_mtu)
836 {
837         struct be_adapter *adapter = netdev_priv(netdev);
838         if (new_mtu < BE_MIN_MTU ||
839                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
840                                         (ETH_HLEN + ETH_FCS_LEN))) {
841                 dev_info(&adapter->pdev->dev,
842                         "MTU must be between %d and %d bytes\n",
843                         BE_MIN_MTU,
844                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
845                 return -EINVAL;
846         }
847         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
848                         netdev->mtu, new_mtu);
849         netdev->mtu = new_mtu;
850         return 0;
851 }
852
853 /*
854  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
855  * If the user configures more, place BE in vlan promiscuous mode.
856  */
857 static int be_vid_config(struct be_adapter *adapter)
858 {
859         u16 vids[BE_NUM_VLANS_SUPPORTED];
860         u16 num = 0, i;
861         int status = 0;
862
863         /* No need to further configure vids if in promiscuous mode */
864         if (adapter->promiscuous)
865                 return 0;
866
867         if (adapter->vlans_added > adapter->max_vlans)
868                 goto set_vlan_promisc;
869
870         /* Construct VLAN Table to give to HW */
871         for (i = 0; i < VLAN_N_VID; i++)
872                 if (adapter->vlan_tag[i])
873                         vids[num++] = cpu_to_le16(i);
874
875         status = be_cmd_vlan_config(adapter, adapter->if_handle,
876                                     vids, num, 1, 0);
877
878         /* Set to VLAN promisc mode as setting VLAN filter failed */
879         if (status) {
880                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
881                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
882                 goto set_vlan_promisc;
883         }
884
885         return status;
886
887 set_vlan_promisc:
888         status = be_cmd_vlan_config(adapter, adapter->if_handle,
889                                     NULL, 0, 1, 1);
890         return status;
891 }
892
893 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
894 {
895         struct be_adapter *adapter = netdev_priv(netdev);
896         int status = 0;
897
898         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
899                 status = -EINVAL;
900                 goto ret;
901         }
902
903         /* Packets with VID 0 are always received by Lancer by default */
904         if (lancer_chip(adapter) && vid == 0)
905                 goto ret;
906
907         adapter->vlan_tag[vid] = 1;
908         if (adapter->vlans_added <= (adapter->max_vlans + 1))
909                 status = be_vid_config(adapter);
910
911         if (!status)
912                 adapter->vlans_added++;
913         else
914                 adapter->vlan_tag[vid] = 0;
915 ret:
916         return status;
917 }
918
919 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
920 {
921         struct be_adapter *adapter = netdev_priv(netdev);
922         int status = 0;
923
924         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
925                 status = -EINVAL;
926                 goto ret;
927         }
928
929         /* Packets with VID 0 are always received by Lancer by default */
930         if (lancer_chip(adapter) && vid == 0)
931                 goto ret;
932
933         adapter->vlan_tag[vid] = 0;
934         if (adapter->vlans_added <= adapter->max_vlans)
935                 status = be_vid_config(adapter);
936
937         if (!status)
938                 adapter->vlans_added--;
939         else
940                 adapter->vlan_tag[vid] = 1;
941 ret:
942         return status;
943 }
944
945 static void be_set_rx_mode(struct net_device *netdev)
946 {
947         struct be_adapter *adapter = netdev_priv(netdev);
948         int status;
949
950         if (netdev->flags & IFF_PROMISC) {
951                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
952                 adapter->promiscuous = true;
953                 goto done;
954         }
955
956         /* BE was previously in promiscuous mode; disable it */
957         if (adapter->promiscuous) {
958                 adapter->promiscuous = false;
959                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
960
961                 if (adapter->vlans_added)
962                         be_vid_config(adapter);
963         }
964
965         /* Enable multicast promisc if num configured exceeds what we support */
966         if (netdev->flags & IFF_ALLMULTI ||
967             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
968                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
969                 goto done;
970         }
971
972         if (netdev_uc_count(netdev) != adapter->uc_macs) {
973                 struct netdev_hw_addr *ha;
974                 int i = 1; /* First slot is claimed by the Primary MAC */
975
976                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
977                         be_cmd_pmac_del(adapter, adapter->if_handle,
978                                         adapter->pmac_id[i], 0);
979                 }
980
981                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
982                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
983                         adapter->promiscuous = true;
984                         goto done;
985                 }
986
987                 netdev_for_each_uc_addr(ha, adapter->netdev) {
988                         adapter->uc_macs++; /* First slot is for Primary MAC */
989                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
990                                         adapter->if_handle,
991                                         &adapter->pmac_id[adapter->uc_macs], 0);
992                 }
993         }
994
995         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
996
997         /* Set to MCAST promisc mode if setting MULTICAST address fails */
998         if (status) {
999                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1000                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1001                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1002         }
1003 done:
1004         return;
1005 }
1006
1007 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1008 {
1009         struct be_adapter *adapter = netdev_priv(netdev);
1010         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1011         int status;
1012         bool active_mac = false;
1013         u32 pmac_id;
1014         u8 old_mac[ETH_ALEN];
1015
1016         if (!sriov_enabled(adapter))
1017                 return -EPERM;
1018
1019         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1020                 return -EINVAL;
1021
1022         if (lancer_chip(adapter)) {
1023                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1024                                                   &pmac_id, vf + 1);
1025                 if (!status && active_mac)
1026                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027                                         pmac_id, vf + 1);
1028
1029                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1030         } else {
1031                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1032                                          vf_cfg->pmac_id, vf + 1);
1033
1034                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1035                                          &vf_cfg->pmac_id, vf + 1);
1036         }
1037
1038         if (status)
1039                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1040                                 mac, vf);
1041         else
1042                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1043
1044         return status;
1045 }
1046
1047 static int be_get_vf_config(struct net_device *netdev, int vf,
1048                         struct ifla_vf_info *vi)
1049 {
1050         struct be_adapter *adapter = netdev_priv(netdev);
1051         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1052
1053         if (!sriov_enabled(adapter))
1054                 return -EPERM;
1055
1056         if (vf >= adapter->num_vfs)
1057                 return -EINVAL;
1058
1059         vi->vf = vf;
1060         vi->tx_rate = vf_cfg->tx_rate;
1061         vi->vlan = vf_cfg->vlan_tag;
1062         vi->qos = 0;
1063         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1064
1065         return 0;
1066 }
1067
1068 static int be_set_vf_vlan(struct net_device *netdev,
1069                         int vf, u16 vlan, u8 qos)
1070 {
1071         struct be_adapter *adapter = netdev_priv(netdev);
1072         int status = 0;
1073
1074         if (!sriov_enabled(adapter))
1075                 return -EPERM;
1076
1077         if (vf >= adapter->num_vfs || vlan > 4095)
1078                 return -EINVAL;
1079
1080         if (vlan) {
1081                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1082                         /* If this is new value, program it. Else skip. */
1083                         adapter->vf_cfg[vf].vlan_tag = vlan;
1084
1085                         status = be_cmd_set_hsw_config(adapter, vlan,
1086                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1087                 }
1088         } else {
1089                 /* Reset Transparent Vlan Tagging. */
1090                 adapter->vf_cfg[vf].vlan_tag = 0;
1091                 vlan = adapter->vf_cfg[vf].def_vid;
1092                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1093                         adapter->vf_cfg[vf].if_handle);
1094         }
1095
1096
1097         if (status)
1098                 dev_info(&adapter->pdev->dev,
1099                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1100         return status;
1101 }
1102
1103 static int be_set_vf_tx_rate(struct net_device *netdev,
1104                         int vf, int rate)
1105 {
1106         struct be_adapter *adapter = netdev_priv(netdev);
1107         int status = 0;
1108
1109         if (!sriov_enabled(adapter))
1110                 return -EPERM;
1111
1112         if (vf >= adapter->num_vfs)
1113                 return -EINVAL;
1114
1115         if (rate < 100 || rate > 10000) {
1116                 dev_err(&adapter->pdev->dev,
1117                         "tx rate must be between 100 and 10000 Mbps\n");
1118                 return -EINVAL;
1119         }
1120
1121         if (lancer_chip(adapter))
1122                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1123         else
1124                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1125
1126         if (status)
1127                 dev_err(&adapter->pdev->dev,
1128                                 "tx rate %d on VF %d failed\n", rate, vf);
1129         else
1130                 adapter->vf_cfg[vf].tx_rate = rate;
1131         return status;
1132 }
1133
1134 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1135 {
1136         struct pci_dev *dev, *pdev = adapter->pdev;
1137         int vfs = 0, assigned_vfs = 0, pos;
1138         u16 offset, stride;
1139
1140         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1141         if (!pos)
1142                 return 0;
1143         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1144         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1145
1146         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1147         while (dev) {
1148                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1149                         vfs++;
1150                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1151                                 assigned_vfs++;
1152                 }
1153                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1154         }
1155         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1156 }
1157
1158 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1159 {
1160         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1161         ulong now = jiffies;
1162         ulong delta = now - stats->rx_jiffies;
1163         u64 pkts;
1164         unsigned int start, eqd;
1165
1166         if (!eqo->enable_aic) {
1167                 eqd = eqo->eqd;
1168                 goto modify_eqd;
1169         }
1170
1171         if (eqo->idx >= adapter->num_rx_qs)
1172                 return;
1173
1174         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1175
1176         /* Wrapped around */
1177         if (time_before(now, stats->rx_jiffies)) {
1178                 stats->rx_jiffies = now;
1179                 return;
1180         }
1181
1182         /* Update once a second */
1183         if (delta < HZ)
1184                 return;
1185
1186         do {
1187                 start = u64_stats_fetch_begin_bh(&stats->sync);
1188                 pkts = stats->rx_pkts;
1189         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1190
1191         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1192         stats->rx_pkts_prev = pkts;
1193         stats->rx_jiffies = now;
1194         eqd = (stats->rx_pps / 110000) << 3;
1195         eqd = min(eqd, eqo->max_eqd);
1196         eqd = max(eqd, eqo->min_eqd);
1197         if (eqd < 10)
1198                 eqd = 0;
1199
1200 modify_eqd:
1201         if (eqd != eqo->cur_eqd) {
1202                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1203                 eqo->cur_eqd = eqd;
1204         }
1205 }
1206
1207 static void be_rx_stats_update(struct be_rx_obj *rxo,
1208                 struct be_rx_compl_info *rxcp)
1209 {
1210         struct be_rx_stats *stats = rx_stats(rxo);
1211
1212         u64_stats_update_begin(&stats->sync);
1213         stats->rx_compl++;
1214         stats->rx_bytes += rxcp->pkt_size;
1215         stats->rx_pkts++;
1216         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1217                 stats->rx_mcast_pkts++;
1218         if (rxcp->err)
1219                 stats->rx_compl_err++;
1220         u64_stats_update_end(&stats->sync);
1221 }
1222
1223 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1224 {
1225         /* L4 checksum is not reliable for non TCP/UDP packets.
1226          * Also ignore ipcksm for ipv6 pkts */
1227         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1228                                 (rxcp->ip_csum || rxcp->ipv6);
1229 }
1230
1231 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1232                                                 u16 frag_idx)
1233 {
1234         struct be_adapter *adapter = rxo->adapter;
1235         struct be_rx_page_info *rx_page_info;
1236         struct be_queue_info *rxq = &rxo->q;
1237
1238         rx_page_info = &rxo->page_info_tbl[frag_idx];
1239         BUG_ON(!rx_page_info->page);
1240
1241         if (rx_page_info->last_page_user) {
1242                 dma_unmap_page(&adapter->pdev->dev,
1243                                dma_unmap_addr(rx_page_info, bus),
1244                                adapter->big_page_size, DMA_FROM_DEVICE);
1245                 rx_page_info->last_page_user = false;
1246         }
1247
1248         atomic_dec(&rxq->used);
1249         return rx_page_info;
1250 }
1251
1252 /* Throwaway the data in the Rx completion */
1253 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1254                                 struct be_rx_compl_info *rxcp)
1255 {
1256         struct be_queue_info *rxq = &rxo->q;
1257         struct be_rx_page_info *page_info;
1258         u16 i, num_rcvd = rxcp->num_rcvd;
1259
1260         for (i = 0; i < num_rcvd; i++) {
1261                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1262                 put_page(page_info->page);
1263                 memset(page_info, 0, sizeof(*page_info));
1264                 index_inc(&rxcp->rxq_idx, rxq->len);
1265         }
1266 }
1267
1268 /*
1269  * skb_fill_rx_data forms a complete skb for an ether frame
1270  * indicated by rxcp.
1271  */
1272 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1273                              struct be_rx_compl_info *rxcp)
1274 {
1275         struct be_queue_info *rxq = &rxo->q;
1276         struct be_rx_page_info *page_info;
1277         u16 i, j;
1278         u16 hdr_len, curr_frag_len, remaining;
1279         u8 *start;
1280
1281         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1282         start = page_address(page_info->page) + page_info->page_offset;
1283         prefetch(start);
1284
1285         /* Copy data in the first descriptor of this completion */
1286         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1287
1288         skb->len = curr_frag_len;
1289         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1290                 memcpy(skb->data, start, curr_frag_len);
1291                 /* Complete packet has now been moved to data */
1292                 put_page(page_info->page);
1293                 skb->data_len = 0;
1294                 skb->tail += curr_frag_len;
1295         } else {
1296                 hdr_len = ETH_HLEN;
1297                 memcpy(skb->data, start, hdr_len);
1298                 skb_shinfo(skb)->nr_frags = 1;
1299                 skb_frag_set_page(skb, 0, page_info->page);
1300                 skb_shinfo(skb)->frags[0].page_offset =
1301                                         page_info->page_offset + hdr_len;
1302                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1303                 skb->data_len = curr_frag_len - hdr_len;
1304                 skb->truesize += rx_frag_size;
1305                 skb->tail += hdr_len;
1306         }
1307         page_info->page = NULL;
1308
1309         if (rxcp->pkt_size <= rx_frag_size) {
1310                 BUG_ON(rxcp->num_rcvd != 1);
1311                 return;
1312         }
1313
1314         /* More frags present for this completion */
1315         index_inc(&rxcp->rxq_idx, rxq->len);
1316         remaining = rxcp->pkt_size - curr_frag_len;
1317         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1318                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1319                 curr_frag_len = min(remaining, rx_frag_size);
1320
1321                 /* Coalesce all frags from the same physical page in one slot */
1322                 if (page_info->page_offset == 0) {
1323                         /* Fresh page */
1324                         j++;
1325                         skb_frag_set_page(skb, j, page_info->page);
1326                         skb_shinfo(skb)->frags[j].page_offset =
1327                                                         page_info->page_offset;
1328                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1329                         skb_shinfo(skb)->nr_frags++;
1330                 } else {
1331                         put_page(page_info->page);
1332                 }
1333
1334                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1335                 skb->len += curr_frag_len;
1336                 skb->data_len += curr_frag_len;
1337                 skb->truesize += rx_frag_size;
1338                 remaining -= curr_frag_len;
1339                 index_inc(&rxcp->rxq_idx, rxq->len);
1340                 page_info->page = NULL;
1341         }
1342         BUG_ON(j > MAX_SKB_FRAGS);
1343 }
1344
1345 /* Process the RX completion indicated by rxcp when GRO is disabled */
1346 static void be_rx_compl_process(struct be_rx_obj *rxo,
1347                                 struct be_rx_compl_info *rxcp)
1348 {
1349         struct be_adapter *adapter = rxo->adapter;
1350         struct net_device *netdev = adapter->netdev;
1351         struct sk_buff *skb;
1352
1353         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1354         if (unlikely(!skb)) {
1355                 rx_stats(rxo)->rx_drops_no_skbs++;
1356                 be_rx_compl_discard(rxo, rxcp);
1357                 return;
1358         }
1359
1360         skb_fill_rx_data(rxo, skb, rxcp);
1361
1362         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1363                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1364         else
1365                 skb_checksum_none_assert(skb);
1366
1367         skb->protocol = eth_type_trans(skb, netdev);
1368         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1369         if (netdev->features & NETIF_F_RXHASH)
1370                 skb->rxhash = rxcp->rss_hash;
1371
1372
1373         if (rxcp->vlanf)
1374                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375
1376         netif_receive_skb(skb);
1377 }
1378
1379 /* Process the RX completion indicated by rxcp when GRO is enabled */
1380 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1381                              struct be_rx_compl_info *rxcp)
1382 {
1383         struct be_adapter *adapter = rxo->adapter;
1384         struct be_rx_page_info *page_info;
1385         struct sk_buff *skb = NULL;
1386         struct be_queue_info *rxq = &rxo->q;
1387         u16 remaining, curr_frag_len;
1388         u16 i, j;
1389
1390         skb = napi_get_frags(napi);
1391         if (!skb) {
1392                 be_rx_compl_discard(rxo, rxcp);
1393                 return;
1394         }
1395
1396         remaining = rxcp->pkt_size;
1397         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1398                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1399
1400                 curr_frag_len = min(remaining, rx_frag_size);
1401
1402                 /* Coalesce all frags from the same physical page in one slot */
1403                 if (i == 0 || page_info->page_offset == 0) {
1404                         /* First frag or Fresh page */
1405                         j++;
1406                         skb_frag_set_page(skb, j, page_info->page);
1407                         skb_shinfo(skb)->frags[j].page_offset =
1408                                                         page_info->page_offset;
1409                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1410                 } else {
1411                         put_page(page_info->page);
1412                 }
1413                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1414                 skb->truesize += rx_frag_size;
1415                 remaining -= curr_frag_len;
1416                 index_inc(&rxcp->rxq_idx, rxq->len);
1417                 memset(page_info, 0, sizeof(*page_info));
1418         }
1419         BUG_ON(j > MAX_SKB_FRAGS);
1420
1421         skb_shinfo(skb)->nr_frags = j + 1;
1422         skb->len = rxcp->pkt_size;
1423         skb->data_len = rxcp->pkt_size;
1424         skb->ip_summed = CHECKSUM_UNNECESSARY;
1425         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1426         if (adapter->netdev->features & NETIF_F_RXHASH)
1427                 skb->rxhash = rxcp->rss_hash;
1428
1429         if (rxcp->vlanf)
1430                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1431
1432         napi_gro_frags(napi);
1433 }
1434
1435 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1436                                  struct be_rx_compl_info *rxcp)
1437 {
1438         rxcp->pkt_size =
1439                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1440         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1441         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1442         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1443         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1444         rxcp->ip_csum =
1445                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1446         rxcp->l4_csum =
1447                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1448         rxcp->ipv6 =
1449                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1450         rxcp->rxq_idx =
1451                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1452         rxcp->num_rcvd =
1453                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1454         rxcp->pkt_type =
1455                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1456         rxcp->rss_hash =
1457                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1458         if (rxcp->vlanf) {
1459                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1460                                           compl);
1461                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1462                                                compl);
1463         }
1464         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1465 }
1466
1467 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1468                                  struct be_rx_compl_info *rxcp)
1469 {
1470         rxcp->pkt_size =
1471                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1472         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1473         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1474         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1475         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1476         rxcp->ip_csum =
1477                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1478         rxcp->l4_csum =
1479                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1480         rxcp->ipv6 =
1481                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1482         rxcp->rxq_idx =
1483                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1484         rxcp->num_rcvd =
1485                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1486         rxcp->pkt_type =
1487                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1488         rxcp->rss_hash =
1489                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1490         if (rxcp->vlanf) {
1491                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1492                                           compl);
1493                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1494                                                compl);
1495         }
1496         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1497 }
1498
1499 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1500 {
1501         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1502         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1503         struct be_adapter *adapter = rxo->adapter;
1504
1505         /* For checking the valid bit it is Ok to use either definition as the
1506          * valid bit is at the same position in both v0 and v1 Rx compl */
1507         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1508                 return NULL;
1509
1510         rmb();
1511         be_dws_le_to_cpu(compl, sizeof(*compl));
1512
1513         if (adapter->be3_native)
1514                 be_parse_rx_compl_v1(compl, rxcp);
1515         else
1516                 be_parse_rx_compl_v0(compl, rxcp);
1517
1518         if (rxcp->vlanf) {
1519                 /* vlanf could be wrongly set in some cards.
1520                  * ignore if vtm is not set */
1521                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1522                         rxcp->vlanf = 0;
1523
1524                 if (!lancer_chip(adapter))
1525                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1526
1527                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1528                     !adapter->vlan_tag[rxcp->vlan_tag])
1529                         rxcp->vlanf = 0;
1530         }
1531
1532         /* As the compl has been parsed, reset it; we wont touch it again */
1533         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1534
1535         queue_tail_inc(&rxo->cq);
1536         return rxcp;
1537 }
1538
1539 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1540 {
1541         u32 order = get_order(size);
1542
1543         if (order > 0)
1544                 gfp |= __GFP_COMP;
1545         return  alloc_pages(gfp, order);
1546 }
1547
1548 /*
1549  * Allocate a page, split it to fragments of size rx_frag_size and post as
1550  * receive buffers to BE
1551  */
1552 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1553 {
1554         struct be_adapter *adapter = rxo->adapter;
1555         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1556         struct be_queue_info *rxq = &rxo->q;
1557         struct page *pagep = NULL;
1558         struct be_eth_rx_d *rxd;
1559         u64 page_dmaaddr = 0, frag_dmaaddr;
1560         u32 posted, page_offset = 0;
1561
1562         page_info = &rxo->page_info_tbl[rxq->head];
1563         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1564                 if (!pagep) {
1565                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1566                         if (unlikely(!pagep)) {
1567                                 rx_stats(rxo)->rx_post_fail++;
1568                                 break;
1569                         }
1570                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1571                                                     0, adapter->big_page_size,
1572                                                     DMA_FROM_DEVICE);
1573                         page_info->page_offset = 0;
1574                 } else {
1575                         get_page(pagep);
1576                         page_info->page_offset = page_offset + rx_frag_size;
1577                 }
1578                 page_offset = page_info->page_offset;
1579                 page_info->page = pagep;
1580                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1581                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1582
1583                 rxd = queue_head_node(rxq);
1584                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1585                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1586
1587                 /* Any space left in the current big page for another frag? */
1588                 if ((page_offset + rx_frag_size + rx_frag_size) >
1589                                         adapter->big_page_size) {
1590                         pagep = NULL;
1591                         page_info->last_page_user = true;
1592                 }
1593
1594                 prev_page_info = page_info;
1595                 queue_head_inc(rxq);
1596                 page_info = &rxo->page_info_tbl[rxq->head];
1597         }
1598         if (pagep)
1599                 prev_page_info->last_page_user = true;
1600
1601         if (posted) {
1602                 atomic_add(posted, &rxq->used);
1603                 be_rxq_notify(adapter, rxq->id, posted);
1604         } else if (atomic_read(&rxq->used) == 0) {
1605                 /* Let be_worker replenish when memory is available */
1606                 rxo->rx_post_starved = true;
1607         }
1608 }
1609
1610 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1611 {
1612         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1613
1614         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1615                 return NULL;
1616
1617         rmb();
1618         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1619
1620         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1621
1622         queue_tail_inc(tx_cq);
1623         return txcp;
1624 }
1625
1626 static u16 be_tx_compl_process(struct be_adapter *adapter,
1627                 struct be_tx_obj *txo, u16 last_index)
1628 {
1629         struct be_queue_info *txq = &txo->q;
1630         struct be_eth_wrb *wrb;
1631         struct sk_buff **sent_skbs = txo->sent_skb_list;
1632         struct sk_buff *sent_skb;
1633         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1634         bool unmap_skb_hdr = true;
1635
1636         sent_skb = sent_skbs[txq->tail];
1637         BUG_ON(!sent_skb);
1638         sent_skbs[txq->tail] = NULL;
1639
1640         /* skip header wrb */
1641         queue_tail_inc(txq);
1642
1643         do {
1644                 cur_index = txq->tail;
1645                 wrb = queue_tail_node(txq);
1646                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1647                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1648                 unmap_skb_hdr = false;
1649
1650                 num_wrbs++;
1651                 queue_tail_inc(txq);
1652         } while (cur_index != last_index);
1653
1654         kfree_skb(sent_skb);
1655         return num_wrbs;
1656 }
1657
1658 /* Return the number of events in the event queue */
1659 static inline int events_get(struct be_eq_obj *eqo)
1660 {
1661         struct be_eq_entry *eqe;
1662         int num = 0;
1663
1664         do {
1665                 eqe = queue_tail_node(&eqo->q);
1666                 if (eqe->evt == 0)
1667                         break;
1668
1669                 rmb();
1670                 eqe->evt = 0;
1671                 num++;
1672                 queue_tail_inc(&eqo->q);
1673         } while (true);
1674
1675         return num;
1676 }
1677
1678 static int event_handle(struct be_eq_obj *eqo)
1679 {
1680         bool rearm = false;
1681         int num = events_get(eqo);
1682
1683         /* Deal with any spurious interrupts that come without events */
1684         if (!num)
1685                 rearm = true;
1686
1687         if (num || msix_enabled(eqo->adapter))
1688                 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1689
1690         if (num)
1691                 napi_schedule(&eqo->napi);
1692
1693         return num;
1694 }
1695
1696 /* Leaves the EQ is disarmed state */
1697 static void be_eq_clean(struct be_eq_obj *eqo)
1698 {
1699         int num = events_get(eqo);
1700
1701         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1702 }
1703
1704 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1705 {
1706         struct be_rx_page_info *page_info;
1707         struct be_queue_info *rxq = &rxo->q;
1708         struct be_queue_info *rx_cq = &rxo->cq;
1709         struct be_rx_compl_info *rxcp;
1710         u16 tail;
1711
1712         /* First cleanup pending rx completions */
1713         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1714                 be_rx_compl_discard(rxo, rxcp);
1715                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1716         }
1717
1718         /* Then free posted rx buffer that were not used */
1719         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1720         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1721                 page_info = get_rx_page_info(rxo, tail);
1722                 put_page(page_info->page);
1723                 memset(page_info, 0, sizeof(*page_info));
1724         }
1725         BUG_ON(atomic_read(&rxq->used));
1726         rxq->tail = rxq->head = 0;
1727 }
1728
1729 static void be_tx_compl_clean(struct be_adapter *adapter)
1730 {
1731         struct be_tx_obj *txo;
1732         struct be_queue_info *txq;
1733         struct be_eth_tx_compl *txcp;
1734         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1735         struct sk_buff *sent_skb;
1736         bool dummy_wrb;
1737         int i, pending_txqs;
1738
1739         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1740         do {
1741                 pending_txqs = adapter->num_tx_qs;
1742
1743                 for_all_tx_queues(adapter, txo, i) {
1744                         txq = &txo->q;
1745                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1746                                 end_idx =
1747                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1748                                                       wrb_index, txcp);
1749                                 num_wrbs += be_tx_compl_process(adapter, txo,
1750                                                                 end_idx);
1751                                 cmpl++;
1752                         }
1753                         if (cmpl) {
1754                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1755                                 atomic_sub(num_wrbs, &txq->used);
1756                                 cmpl = 0;
1757                                 num_wrbs = 0;
1758                         }
1759                         if (atomic_read(&txq->used) == 0)
1760                                 pending_txqs--;
1761                 }
1762
1763                 if (pending_txqs == 0 || ++timeo > 200)
1764                         break;
1765
1766                 mdelay(1);
1767         } while (true);
1768
1769         for_all_tx_queues(adapter, txo, i) {
1770                 txq = &txo->q;
1771                 if (atomic_read(&txq->used))
1772                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1773                                 atomic_read(&txq->used));
1774
1775                 /* free posted tx for which compls will never arrive */
1776                 while (atomic_read(&txq->used)) {
1777                         sent_skb = txo->sent_skb_list[txq->tail];
1778                         end_idx = txq->tail;
1779                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1780                                                    &dummy_wrb);
1781                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1782                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1783                         atomic_sub(num_wrbs, &txq->used);
1784                 }
1785         }
1786 }
1787
1788 static void be_evt_queues_destroy(struct be_adapter *adapter)
1789 {
1790         struct be_eq_obj *eqo;
1791         int i;
1792
1793         for_all_evt_queues(adapter, eqo, i) {
1794                 if (eqo->q.created) {
1795                         be_eq_clean(eqo);
1796                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1797                 }
1798                 be_queue_free(adapter, &eqo->q);
1799         }
1800 }
1801
1802 static int be_evt_queues_create(struct be_adapter *adapter)
1803 {
1804         struct be_queue_info *eq;
1805         struct be_eq_obj *eqo;
1806         int i, rc;
1807
1808         adapter->num_evt_qs = num_irqs(adapter);
1809
1810         for_all_evt_queues(adapter, eqo, i) {
1811                 eqo->adapter = adapter;
1812                 eqo->tx_budget = BE_TX_BUDGET;
1813                 eqo->idx = i;
1814                 eqo->max_eqd = BE_MAX_EQD;
1815                 eqo->enable_aic = true;
1816
1817                 eq = &eqo->q;
1818                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1819                                         sizeof(struct be_eq_entry));
1820                 if (rc)
1821                         return rc;
1822
1823                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1824                 if (rc)
1825                         return rc;
1826         }
1827         return 0;
1828 }
1829
1830 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1831 {
1832         struct be_queue_info *q;
1833
1834         q = &adapter->mcc_obj.q;
1835         if (q->created)
1836                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1837         be_queue_free(adapter, q);
1838
1839         q = &adapter->mcc_obj.cq;
1840         if (q->created)
1841                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1842         be_queue_free(adapter, q);
1843 }
1844
1845 /* Must be called only after TX qs are created as MCC shares TX EQ */
1846 static int be_mcc_queues_create(struct be_adapter *adapter)
1847 {
1848         struct be_queue_info *q, *cq;
1849
1850         cq = &adapter->mcc_obj.cq;
1851         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1852                         sizeof(struct be_mcc_compl)))
1853                 goto err;
1854
1855         /* Use the default EQ for MCC completions */
1856         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1857                 goto mcc_cq_free;
1858
1859         q = &adapter->mcc_obj.q;
1860         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1861                 goto mcc_cq_destroy;
1862
1863         if (be_cmd_mccq_create(adapter, q, cq))
1864                 goto mcc_q_free;
1865
1866         return 0;
1867
1868 mcc_q_free:
1869         be_queue_free(adapter, q);
1870 mcc_cq_destroy:
1871         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1872 mcc_cq_free:
1873         be_queue_free(adapter, cq);
1874 err:
1875         return -1;
1876 }
1877
1878 static void be_tx_queues_destroy(struct be_adapter *adapter)
1879 {
1880         struct be_queue_info *q;
1881         struct be_tx_obj *txo;
1882         u8 i;
1883
1884         for_all_tx_queues(adapter, txo, i) {
1885                 q = &txo->q;
1886                 if (q->created)
1887                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1888                 be_queue_free(adapter, q);
1889
1890                 q = &txo->cq;
1891                 if (q->created)
1892                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1893                 be_queue_free(adapter, q);
1894         }
1895 }
1896
1897 static int be_num_txqs_want(struct be_adapter *adapter)
1898 {
1899         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1900             be_is_mc(adapter) ||
1901             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1902             BE2_chip(adapter))
1903                 return 1;
1904         else
1905                 return adapter->max_tx_queues;
1906 }
1907
1908 static int be_tx_cqs_create(struct be_adapter *adapter)
1909 {
1910         struct be_queue_info *cq, *eq;
1911         int status;
1912         struct be_tx_obj *txo;
1913         u8 i;
1914
1915         adapter->num_tx_qs = be_num_txqs_want(adapter);
1916         if (adapter->num_tx_qs != MAX_TX_QS) {
1917                 rtnl_lock();
1918                 netif_set_real_num_tx_queues(adapter->netdev,
1919                         adapter->num_tx_qs);
1920                 rtnl_unlock();
1921         }
1922
1923         for_all_tx_queues(adapter, txo, i) {
1924                 cq = &txo->cq;
1925                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1926                                         sizeof(struct be_eth_tx_compl));
1927                 if (status)
1928                         return status;
1929
1930                 /* If num_evt_qs is less than num_tx_qs, then more than
1931                  * one txq share an eq
1932                  */
1933                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1934                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1935                 if (status)
1936                         return status;
1937         }
1938         return 0;
1939 }
1940
1941 static int be_tx_qs_create(struct be_adapter *adapter)
1942 {
1943         struct be_tx_obj *txo;
1944         int i, status;
1945
1946         for_all_tx_queues(adapter, txo, i) {
1947                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1948                                         sizeof(struct be_eth_wrb));
1949                 if (status)
1950                         return status;
1951
1952                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1953                 if (status)
1954                         return status;
1955         }
1956
1957         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1958                  adapter->num_tx_qs);
1959         return 0;
1960 }
1961
1962 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1963 {
1964         struct be_queue_info *q;
1965         struct be_rx_obj *rxo;
1966         int i;
1967
1968         for_all_rx_queues(adapter, rxo, i) {
1969                 q = &rxo->cq;
1970                 if (q->created)
1971                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1972                 be_queue_free(adapter, q);
1973         }
1974 }
1975
1976 static int be_rx_cqs_create(struct be_adapter *adapter)
1977 {
1978         struct be_queue_info *eq, *cq;
1979         struct be_rx_obj *rxo;
1980         int rc, i;
1981
1982         /* We'll create as many RSS rings as there are irqs.
1983          * But when there's only one irq there's no use creating RSS rings
1984          */
1985         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1986                                 num_irqs(adapter) + 1 : 1;
1987         if (adapter->num_rx_qs != MAX_RX_QS) {
1988                 rtnl_lock();
1989                 netif_set_real_num_rx_queues(adapter->netdev,
1990                                              adapter->num_rx_qs);
1991                 rtnl_unlock();
1992         }
1993
1994         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1995         for_all_rx_queues(adapter, rxo, i) {
1996                 rxo->adapter = adapter;
1997                 cq = &rxo->cq;
1998                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1999                                 sizeof(struct be_eth_rx_compl));
2000                 if (rc)
2001                         return rc;
2002
2003                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2004                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2005                 if (rc)
2006                         return rc;
2007         }
2008
2009         dev_info(&adapter->pdev->dev,
2010                  "created %d RSS queue(s) and 1 default RX queue\n",
2011                  adapter->num_rx_qs - 1);
2012         return 0;
2013 }
2014
2015 static irqreturn_t be_intx(int irq, void *dev)
2016 {
2017         struct be_adapter *adapter = dev;
2018         int num_evts;
2019
2020         /* With INTx only one EQ is used */
2021         num_evts = event_handle(&adapter->eq_obj[0]);
2022         if (num_evts)
2023                 return IRQ_HANDLED;
2024         else
2025                 return IRQ_NONE;
2026 }
2027
2028 static irqreturn_t be_msix(int irq, void *dev)
2029 {
2030         struct be_eq_obj *eqo = dev;
2031
2032         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2033         napi_schedule(&eqo->napi);
2034         return IRQ_HANDLED;
2035 }
2036
2037 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2038 {
2039         return (rxcp->tcpf && !rxcp->err) ? true : false;
2040 }
2041
2042 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2043                         int budget)
2044 {
2045         struct be_adapter *adapter = rxo->adapter;
2046         struct be_queue_info *rx_cq = &rxo->cq;
2047         struct be_rx_compl_info *rxcp;
2048         u32 work_done;
2049
2050         for (work_done = 0; work_done < budget; work_done++) {
2051                 rxcp = be_rx_compl_get(rxo);
2052                 if (!rxcp)
2053                         break;
2054
2055                 /* Is it a flush compl that has no data */
2056                 if (unlikely(rxcp->num_rcvd == 0))
2057                         goto loop_continue;
2058
2059                 /* Discard compl with partial DMA Lancer B0 */
2060                 if (unlikely(!rxcp->pkt_size)) {
2061                         be_rx_compl_discard(rxo, rxcp);
2062                         goto loop_continue;
2063                 }
2064
2065                 /* On BE drop pkts that arrive due to imperfect filtering in
2066                  * promiscuous mode on some skews
2067                  */
2068                 if (unlikely(rxcp->port != adapter->port_num &&
2069                                 !lancer_chip(adapter))) {
2070                         be_rx_compl_discard(rxo, rxcp);
2071                         goto loop_continue;
2072                 }
2073
2074                 if (do_gro(rxcp))
2075                         be_rx_compl_process_gro(rxo, napi, rxcp);
2076                 else
2077                         be_rx_compl_process(rxo, rxcp);
2078 loop_continue:
2079                 be_rx_stats_update(rxo, rxcp);
2080         }
2081
2082         if (work_done) {
2083                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2084
2085                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2086                         be_post_rx_frags(rxo, GFP_ATOMIC);
2087         }
2088
2089         return work_done;
2090 }
2091
2092 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2093                           int budget, int idx)
2094 {
2095         struct be_eth_tx_compl *txcp;
2096         int num_wrbs = 0, work_done;
2097
2098         for (work_done = 0; work_done < budget; work_done++) {
2099                 txcp = be_tx_compl_get(&txo->cq);
2100                 if (!txcp)
2101                         break;
2102                 num_wrbs += be_tx_compl_process(adapter, txo,
2103                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2104                                         wrb_index, txcp));
2105         }
2106
2107         if (work_done) {
2108                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2109                 atomic_sub(num_wrbs, &txo->q.used);
2110
2111                 /* As Tx wrbs have been freed up, wake up netdev queue
2112                  * if it was stopped due to lack of tx wrbs.  */
2113                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2114                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2115                         netif_wake_subqueue(adapter->netdev, idx);
2116                 }
2117
2118                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2119                 tx_stats(txo)->tx_compl += work_done;
2120                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2121         }
2122         return (work_done < budget); /* Done */
2123 }
2124
2125 int be_poll(struct napi_struct *napi, int budget)
2126 {
2127         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2128         struct be_adapter *adapter = eqo->adapter;
2129         int max_work = 0, work, i, num_evts;
2130         bool tx_done;
2131
2132         num_evts = events_get(eqo);
2133
2134         /* Process all TXQs serviced by this EQ */
2135         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2136                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2137                                         eqo->tx_budget, i);
2138                 if (!tx_done)
2139                         max_work = budget;
2140         }
2141
2142         /* This loop will iterate twice for EQ0 in which
2143          * completions of the last RXQ (default one) are also processed
2144          * For other EQs the loop iterates only once
2145          */
2146         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2147                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2148                 max_work = max(work, max_work);
2149         }
2150
2151         if (is_mcc_eqo(eqo))
2152                 be_process_mcc(adapter);
2153
2154         if (max_work < budget) {
2155                 napi_complete(napi);
2156                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2157         } else {
2158                 /* As we'll continue in polling mode, count and clear events */
2159                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2160         }
2161         return max_work;
2162 }
2163
2164 void be_detect_error(struct be_adapter *adapter)
2165 {
2166         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2167         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2168         u32 i;
2169
2170         if (be_crit_error(adapter))
2171                 return;
2172
2173         if (lancer_chip(adapter)) {
2174                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2175                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2176                         sliport_err1 = ioread32(adapter->db +
2177                                         SLIPORT_ERROR1_OFFSET);
2178                         sliport_err2 = ioread32(adapter->db +
2179                                         SLIPORT_ERROR2_OFFSET);
2180                 }
2181         } else {
2182                 pci_read_config_dword(adapter->pdev,
2183                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2184                 pci_read_config_dword(adapter->pdev,
2185                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2186                 pci_read_config_dword(adapter->pdev,
2187                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2188                 pci_read_config_dword(adapter->pdev,
2189                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2190
2191                 ue_lo = (ue_lo & ~ue_lo_mask);
2192                 ue_hi = (ue_hi & ~ue_hi_mask);
2193         }
2194
2195         /* On certain platforms BE hardware can indicate spurious UEs.
2196          * Allow the h/w to stop working completely in case of a real UE.
2197          * Hence not setting the hw_error for UE detection.
2198          */
2199         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2200                 adapter->hw_error = true;
2201                 dev_err(&adapter->pdev->dev,
2202                         "Error detected in the card\n");
2203         }
2204
2205         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2206                 dev_err(&adapter->pdev->dev,
2207                         "ERR: sliport status 0x%x\n", sliport_status);
2208                 dev_err(&adapter->pdev->dev,
2209                         "ERR: sliport error1 0x%x\n", sliport_err1);
2210                 dev_err(&adapter->pdev->dev,
2211                         "ERR: sliport error2 0x%x\n", sliport_err2);
2212         }
2213
2214         if (ue_lo) {
2215                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2216                         if (ue_lo & 1)
2217                                 dev_err(&adapter->pdev->dev,
2218                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2219                 }
2220         }
2221
2222         if (ue_hi) {
2223                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2224                         if (ue_hi & 1)
2225                                 dev_err(&adapter->pdev->dev,
2226                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2227                 }
2228         }
2229
2230 }
2231
2232 static void be_msix_disable(struct be_adapter *adapter)
2233 {
2234         if (msix_enabled(adapter)) {
2235                 pci_disable_msix(adapter->pdev);
2236                 adapter->num_msix_vec = 0;
2237         }
2238 }
2239
2240 static uint be_num_rss_want(struct be_adapter *adapter)
2241 {
2242         u32 num = 0;
2243
2244         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2245             (lancer_chip(adapter) ||
2246              (!sriov_want(adapter) && be_physfn(adapter)))) {
2247                 num = adapter->max_rss_queues;
2248                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2249         }
2250         return num;
2251 }
2252
2253 static void be_msix_enable(struct be_adapter *adapter)
2254 {
2255 #define BE_MIN_MSIX_VECTORS             1
2256         int i, status, num_vec, num_roce_vec = 0;
2257         struct device *dev = &adapter->pdev->dev;
2258
2259         /* If RSS queues are not used, need a vec for default RX Q */
2260         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2261         if (be_roce_supported(adapter)) {
2262                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2263                                         (num_online_cpus() + 1));
2264                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2265                 num_vec += num_roce_vec;
2266                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2267         }
2268         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2269
2270         for (i = 0; i < num_vec; i++)
2271                 adapter->msix_entries[i].entry = i;
2272
2273         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2274         if (status == 0) {
2275                 goto done;
2276         } else if (status >= BE_MIN_MSIX_VECTORS) {
2277                 num_vec = status;
2278                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2279                                 num_vec) == 0)
2280                         goto done;
2281         }
2282
2283         dev_warn(dev, "MSIx enable failed\n");
2284         return;
2285 done:
2286         if (be_roce_supported(adapter)) {
2287                 if (num_vec > num_roce_vec) {
2288                         adapter->num_msix_vec = num_vec - num_roce_vec;
2289                         adapter->num_msix_roce_vec =
2290                                 num_vec - adapter->num_msix_vec;
2291                 } else {
2292                         adapter->num_msix_vec = num_vec;
2293                         adapter->num_msix_roce_vec = 0;
2294                 }
2295         } else
2296                 adapter->num_msix_vec = num_vec;
2297         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2298         return;
2299 }
2300
2301 static inline int be_msix_vec_get(struct be_adapter *adapter,
2302                                 struct be_eq_obj *eqo)
2303 {
2304         return adapter->msix_entries[eqo->idx].vector;
2305 }
2306
2307 static int be_msix_register(struct be_adapter *adapter)
2308 {
2309         struct net_device *netdev = adapter->netdev;
2310         struct be_eq_obj *eqo;
2311         int status, i, vec;
2312
2313         for_all_evt_queues(adapter, eqo, i) {
2314                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2315                 vec = be_msix_vec_get(adapter, eqo);
2316                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2317                 if (status)
2318                         goto err_msix;
2319         }
2320
2321         return 0;
2322 err_msix:
2323         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2324                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2325         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2326                 status);
2327         be_msix_disable(adapter);
2328         return status;
2329 }
2330
2331 static int be_irq_register(struct be_adapter *adapter)
2332 {
2333         struct net_device *netdev = adapter->netdev;
2334         int status;
2335
2336         if (msix_enabled(adapter)) {
2337                 status = be_msix_register(adapter);
2338                 if (status == 0)
2339                         goto done;
2340                 /* INTx is not supported for VF */
2341                 if (!be_physfn(adapter))
2342                         return status;
2343         }
2344
2345         /* INTx */
2346         netdev->irq = adapter->pdev->irq;
2347         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2348                         adapter);
2349         if (status) {
2350                 dev_err(&adapter->pdev->dev,
2351                         "INTx request IRQ failed - err %d\n", status);
2352                 return status;
2353         }
2354 done:
2355         adapter->isr_registered = true;
2356         return 0;
2357 }
2358
2359 static void be_irq_unregister(struct be_adapter *adapter)
2360 {
2361         struct net_device *netdev = adapter->netdev;
2362         struct be_eq_obj *eqo;
2363         int i;
2364
2365         if (!adapter->isr_registered)
2366                 return;
2367
2368         /* INTx */
2369         if (!msix_enabled(adapter)) {
2370                 free_irq(netdev->irq, adapter);
2371                 goto done;
2372         }
2373
2374         /* MSIx */
2375         for_all_evt_queues(adapter, eqo, i)
2376                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2377
2378 done:
2379         adapter->isr_registered = false;
2380 }
2381
2382 static void be_rx_qs_destroy(struct be_adapter *adapter)
2383 {
2384         struct be_queue_info *q;
2385         struct be_rx_obj *rxo;
2386         int i;
2387
2388         for_all_rx_queues(adapter, rxo, i) {
2389                 q = &rxo->q;
2390                 if (q->created) {
2391                         be_cmd_rxq_destroy(adapter, q);
2392                         /* After the rxq is invalidated, wait for a grace time
2393                          * of 1ms for all dma to end and the flush compl to
2394                          * arrive
2395                          */
2396                         mdelay(1);
2397                         be_rx_cq_clean(rxo);
2398                 }
2399                 be_queue_free(adapter, q);
2400         }
2401 }
2402
2403 static int be_close(struct net_device *netdev)
2404 {
2405         struct be_adapter *adapter = netdev_priv(netdev);
2406         struct be_eq_obj *eqo;
2407         int i;
2408
2409         be_roce_dev_close(adapter);
2410
2411         be_async_mcc_disable(adapter);
2412
2413         if (!lancer_chip(adapter))
2414                 be_intr_set(adapter, false);
2415
2416         for_all_evt_queues(adapter, eqo, i) {
2417                 napi_disable(&eqo->napi);
2418                 if (msix_enabled(adapter))
2419                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2420                 else
2421                         synchronize_irq(netdev->irq);
2422                 be_eq_clean(eqo);
2423         }
2424
2425         be_irq_unregister(adapter);
2426
2427         /* Wait for all pending tx completions to arrive so that
2428          * all tx skbs are freed.
2429          */
2430         be_tx_compl_clean(adapter);
2431
2432         be_rx_qs_destroy(adapter);
2433         return 0;
2434 }
2435
2436 static int be_rx_qs_create(struct be_adapter *adapter)
2437 {
2438         struct be_rx_obj *rxo;
2439         int rc, i, j;
2440         u8 rsstable[128];
2441
2442         for_all_rx_queues(adapter, rxo, i) {
2443                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2444                                     sizeof(struct be_eth_rx_d));
2445                 if (rc)
2446                         return rc;
2447         }
2448
2449         /* The FW would like the default RXQ to be created first */
2450         rxo = default_rxo(adapter);
2451         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2452                                adapter->if_handle, false, &rxo->rss_id);
2453         if (rc)
2454                 return rc;
2455
2456         for_all_rss_queues(adapter, rxo, i) {
2457                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2458                                        rx_frag_size, adapter->if_handle,
2459                                        true, &rxo->rss_id);
2460                 if (rc)
2461                         return rc;
2462         }
2463
2464         if (be_multi_rxq(adapter)) {
2465                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2466                         for_all_rss_queues(adapter, rxo, i) {
2467                                 if ((j + i) >= 128)
2468                                         break;
2469                                 rsstable[j + i] = rxo->rss_id;
2470                         }
2471                 }
2472                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2473                 if (rc)
2474                         return rc;
2475         }
2476
2477         /* First time posting */
2478         for_all_rx_queues(adapter, rxo, i)
2479                 be_post_rx_frags(rxo, GFP_KERNEL);
2480         return 0;
2481 }
2482
2483 static int be_open(struct net_device *netdev)
2484 {
2485         struct be_adapter *adapter = netdev_priv(netdev);
2486         struct be_eq_obj *eqo;
2487         struct be_rx_obj *rxo;
2488         struct be_tx_obj *txo;
2489         u8 link_status;
2490         int status, i;
2491
2492         status = be_rx_qs_create(adapter);
2493         if (status)
2494                 goto err;
2495
2496         be_irq_register(adapter);
2497
2498         if (!lancer_chip(adapter))
2499                 be_intr_set(adapter, true);
2500
2501         for_all_rx_queues(adapter, rxo, i)
2502                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2503
2504         for_all_tx_queues(adapter, txo, i)
2505                 be_cq_notify(adapter, txo->cq.id, true, 0);
2506
2507         be_async_mcc_enable(adapter);
2508
2509         for_all_evt_queues(adapter, eqo, i) {
2510                 napi_enable(&eqo->napi);
2511                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2512         }
2513
2514         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2515         if (!status)
2516                 be_link_status_update(adapter, link_status);
2517
2518         be_roce_dev_open(adapter);
2519         return 0;
2520 err:
2521         be_close(adapter->netdev);
2522         return -EIO;
2523 }
2524
2525 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2526 {
2527         struct be_dma_mem cmd;
2528         int status = 0;
2529         u8 mac[ETH_ALEN];
2530
2531         memset(mac, 0, ETH_ALEN);
2532
2533         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2534         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2535                                     GFP_KERNEL);
2536         if (cmd.va == NULL)
2537                 return -1;
2538         memset(cmd.va, 0, cmd.size);
2539
2540         if (enable) {
2541                 status = pci_write_config_dword(adapter->pdev,
2542                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2543                 if (status) {
2544                         dev_err(&adapter->pdev->dev,
2545                                 "Could not enable Wake-on-lan\n");
2546                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2547                                           cmd.dma);
2548                         return status;
2549                 }
2550                 status = be_cmd_enable_magic_wol(adapter,
2551                                 adapter->netdev->dev_addr, &cmd);
2552                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2553                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2554         } else {
2555                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2556                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2557                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2558         }
2559
2560         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2561         return status;
2562 }
2563
2564 /*
2565  * Generate a seed MAC address from the PF MAC Address using jhash.
2566  * MAC Address for VFs are assigned incrementally starting from the seed.
2567  * These addresses are programmed in the ASIC by the PF and the VF driver
2568  * queries for the MAC address during its probe.
2569  */
2570 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2571 {
2572         u32 vf;
2573         int status = 0;
2574         u8 mac[ETH_ALEN];
2575         struct be_vf_cfg *vf_cfg;
2576
2577         be_vf_eth_addr_generate(adapter, mac);
2578
2579         for_all_vfs(adapter, vf_cfg, vf) {
2580                 if (lancer_chip(adapter)) {
2581                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2582                 } else {
2583                         status = be_cmd_pmac_add(adapter, mac,
2584                                                  vf_cfg->if_handle,
2585                                                  &vf_cfg->pmac_id, vf + 1);
2586                 }
2587
2588                 if (status)
2589                         dev_err(&adapter->pdev->dev,
2590                         "Mac address assignment failed for VF %d\n", vf);
2591                 else
2592                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2593
2594                 mac[5] += 1;
2595         }
2596         return status;
2597 }
2598
2599 static void be_vf_clear(struct be_adapter *adapter)
2600 {
2601         struct be_vf_cfg *vf_cfg;
2602         u32 vf;
2603
2604         if (be_find_vfs(adapter, ASSIGNED)) {
2605                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2606                 goto done;
2607         }
2608
2609         for_all_vfs(adapter, vf_cfg, vf) {
2610                 if (lancer_chip(adapter))
2611                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2612                 else
2613                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2614                                         vf_cfg->pmac_id, vf + 1);
2615
2616                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2617         }
2618         pci_disable_sriov(adapter->pdev);
2619 done:
2620         kfree(adapter->vf_cfg);
2621         adapter->num_vfs = 0;
2622 }
2623
2624 static int be_clear(struct be_adapter *adapter)
2625 {
2626         int i = 1;
2627
2628         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2629                 cancel_delayed_work_sync(&adapter->work);
2630                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2631         }
2632
2633         if (sriov_enabled(adapter))
2634                 be_vf_clear(adapter);
2635
2636         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2637                 be_cmd_pmac_del(adapter, adapter->if_handle,
2638                         adapter->pmac_id[i], 0);
2639
2640         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2641
2642         be_mcc_queues_destroy(adapter);
2643         be_rx_cqs_destroy(adapter);
2644         be_tx_queues_destroy(adapter);
2645         be_evt_queues_destroy(adapter);
2646
2647         kfree(adapter->pmac_id);
2648         adapter->pmac_id = NULL;
2649
2650         be_msix_disable(adapter);
2651         return 0;
2652 }
2653
2654 static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2655                                    u32 *cap_flags, u8 domain)
2656 {
2657         bool profile_present = false;
2658         int status;
2659
2660         if (lancer_chip(adapter)) {
2661                 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2662                 if (!status)
2663                         profile_present = true;
2664         }
2665
2666         if (!profile_present)
2667                 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2668                              BE_IF_FLAGS_MULTICAST;
2669 }
2670
2671 static int be_vf_setup_init(struct be_adapter *adapter)
2672 {
2673         struct be_vf_cfg *vf_cfg;
2674         int vf;
2675
2676         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2677                                   GFP_KERNEL);
2678         if (!adapter->vf_cfg)
2679                 return -ENOMEM;
2680
2681         for_all_vfs(adapter, vf_cfg, vf) {
2682                 vf_cfg->if_handle = -1;
2683                 vf_cfg->pmac_id = -1;
2684         }
2685         return 0;
2686 }
2687
2688 static int be_vf_setup(struct be_adapter *adapter)
2689 {
2690         struct be_vf_cfg *vf_cfg;
2691         struct device *dev = &adapter->pdev->dev;
2692         u32 cap_flags, en_flags, vf;
2693         u16 def_vlan, lnk_speed;
2694         int status, enabled_vfs;
2695
2696         enabled_vfs = be_find_vfs(adapter, ENABLED);
2697         if (enabled_vfs) {
2698                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2699                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2700                 return 0;
2701         }
2702
2703         if (num_vfs > adapter->dev_num_vfs) {
2704                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2705                          adapter->dev_num_vfs, num_vfs);
2706                 num_vfs = adapter->dev_num_vfs;
2707         }
2708
2709         status = pci_enable_sriov(adapter->pdev, num_vfs);
2710         if (!status) {
2711                 adapter->num_vfs = num_vfs;
2712         } else {
2713                 /* Platform doesn't support SRIOV though device supports it */
2714                 dev_warn(dev, "SRIOV enable failed\n");
2715                 return 0;
2716         }
2717
2718         status = be_vf_setup_init(adapter);
2719         if (status)
2720                 goto err;
2721
2722         for_all_vfs(adapter, vf_cfg, vf) {
2723                 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2724
2725                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2726                                         BE_IF_FLAGS_BROADCAST |
2727                                         BE_IF_FLAGS_MULTICAST);
2728
2729                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2730                                           &vf_cfg->if_handle, vf + 1);
2731                 if (status)
2732                         goto err;
2733         }
2734
2735         if (!enabled_vfs) {
2736                 status = be_vf_eth_addr_config(adapter);
2737                 if (status)
2738                         goto err;
2739         }
2740
2741         for_all_vfs(adapter, vf_cfg, vf) {
2742                 lnk_speed = 1000;
2743                 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2744                 if (status)
2745                         goto err;
2746                 vf_cfg->tx_rate = lnk_speed * 10;
2747
2748                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2749                                 vf + 1, vf_cfg->if_handle);
2750                 if (status)
2751                         goto err;
2752                 vf_cfg->def_vid = def_vlan;
2753
2754                 be_cmd_enable_vf(adapter, vf + 1);
2755         }
2756         return 0;
2757 err:
2758         return status;
2759 }
2760
2761 static void be_setup_init(struct be_adapter *adapter)
2762 {
2763         adapter->vlan_prio_bmap = 0xff;
2764         adapter->phy.link_speed = -1;
2765         adapter->if_handle = -1;
2766         adapter->be3_native = false;
2767         adapter->promiscuous = false;
2768         if (be_physfn(adapter))
2769                 adapter->cmd_privileges = MAX_PRIVILEGES;
2770         else
2771                 adapter->cmd_privileges = MIN_PRIVILEGES;
2772 }
2773
2774 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2775                            bool *active_mac, u32 *pmac_id)
2776 {
2777         int status = 0;
2778
2779         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2780                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2781                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2782                         *active_mac = true;
2783                 else
2784                         *active_mac = false;
2785
2786                 return status;
2787         }
2788
2789         if (lancer_chip(adapter)) {
2790                 status = be_cmd_get_mac_from_list(adapter, mac,
2791                                                   active_mac, pmac_id, 0);
2792                 if (*active_mac) {
2793                         status = be_cmd_mac_addr_query(adapter, mac, false,
2794                                                        if_handle, *pmac_id);
2795                 }
2796         } else if (be_physfn(adapter)) {
2797                 /* For BE3, for PF get permanent MAC */
2798                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2799                 *active_mac = false;
2800         } else {
2801                 /* For BE3, for VF get soft MAC assigned by PF*/
2802                 status = be_cmd_mac_addr_query(adapter, mac, false,
2803                                                if_handle, 0);
2804                 *active_mac = true;
2805         }
2806         return status;
2807 }
2808
2809 static void be_get_resources(struct be_adapter *adapter)
2810 {
2811         int status;
2812         bool profile_present = false;
2813
2814         if (lancer_chip(adapter)) {
2815                 status = be_cmd_get_func_config(adapter);
2816
2817                 if (!status)
2818                         profile_present = true;
2819         }
2820
2821         if (profile_present) {
2822                 /* Sanity fixes for Lancer */
2823                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2824                                               BE_UC_PMAC_COUNT);
2825                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2826                                            BE_NUM_VLANS_SUPPORTED);
2827                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2828                                                BE_MAX_MC);
2829                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2830                                                MAX_TX_QS);
2831                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2832                                                 BE3_MAX_RSS_QS);
2833                 adapter->max_event_queues = min_t(u16,
2834                                                   adapter->max_event_queues,
2835                                                   BE3_MAX_RSS_QS);
2836
2837                 if (adapter->max_rss_queues &&
2838                     adapter->max_rss_queues == adapter->max_rx_queues)
2839                         adapter->max_rss_queues -= 1;
2840
2841                 if (adapter->max_event_queues < adapter->max_rss_queues)
2842                         adapter->max_rss_queues = adapter->max_event_queues;
2843
2844         } else {
2845                 if (be_physfn(adapter))
2846                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2847                 else
2848                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2849
2850                 if (adapter->function_mode & FLEX10_MODE)
2851                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2852                 else
2853                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2854
2855                 adapter->max_mcast_mac = BE_MAX_MC;
2856                 adapter->max_tx_queues = MAX_TX_QS;
2857                 adapter->max_rss_queues = (adapter->be3_native) ?
2858                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2859                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2860
2861                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2862                                         BE_IF_FLAGS_BROADCAST |
2863                                         BE_IF_FLAGS_MULTICAST |
2864                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2865                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2866                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2867                                         BE_IF_FLAGS_PROMISCUOUS;
2868
2869                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2870                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2871         }
2872 }
2873
2874 /* Routine to query per function resource limits */
2875 static int be_get_config(struct be_adapter *adapter)
2876 {
2877         int pos, status;
2878         u16 dev_num_vfs;
2879
2880         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2881                                      &adapter->function_mode,
2882                                      &adapter->function_caps);
2883         if (status)
2884                 goto err;
2885
2886         be_get_resources(adapter);
2887
2888         /* primary mac needs 1 pmac entry */
2889         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2890                                    sizeof(u32), GFP_KERNEL);
2891         if (!adapter->pmac_id) {
2892                 status = -ENOMEM;
2893                 goto err;
2894         }
2895
2896         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2897         if (pos) {
2898                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2899                                      &dev_num_vfs);
2900                 if (!lancer_chip(adapter))
2901                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2902                 adapter->dev_num_vfs = dev_num_vfs;
2903         }
2904 err:
2905         return status;
2906 }
2907
2908 static int be_setup(struct be_adapter *adapter)
2909 {
2910         struct device *dev = &adapter->pdev->dev;
2911         u32 en_flags;
2912         u32 tx_fc, rx_fc;
2913         int status;
2914         u8 mac[ETH_ALEN];
2915         bool active_mac;
2916
2917         be_setup_init(adapter);
2918
2919         if (!lancer_chip(adapter))
2920                 be_cmd_req_native_mode(adapter);
2921
2922         status = be_get_config(adapter);
2923         if (status)
2924                 goto err;
2925
2926         be_msix_enable(adapter);
2927
2928         status = be_evt_queues_create(adapter);
2929         if (status)
2930                 goto err;
2931
2932         status = be_tx_cqs_create(adapter);
2933         if (status)
2934                 goto err;
2935
2936         status = be_rx_cqs_create(adapter);
2937         if (status)
2938                 goto err;
2939
2940         status = be_mcc_queues_create(adapter);
2941         if (status)
2942                 goto err;
2943
2944         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2945         /* In UMC mode FW does not return right privileges.
2946          * Override with correct privilege equivalent to PF.
2947          */
2948         if (be_is_mc(adapter))
2949                 adapter->cmd_privileges = MAX_PRIVILEGES;
2950
2951         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2952                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2953
2954         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2955                 en_flags |= BE_IF_FLAGS_RSS;
2956
2957         en_flags = en_flags & adapter->if_cap_flags;
2958
2959         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
2960                                   &adapter->if_handle, 0);
2961         if (status != 0)
2962                 goto err;
2963
2964         memset(mac, 0, ETH_ALEN);
2965         active_mac = false;
2966         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2967                                  &active_mac, &adapter->pmac_id[0]);
2968         if (status != 0)
2969                 goto err;
2970
2971         if (!active_mac) {
2972                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2973                                          &adapter->pmac_id[0], 0);
2974                 if (status != 0)
2975                         goto err;
2976         }
2977
2978         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2979                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2980                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2981         }
2982
2983         status = be_tx_qs_create(adapter);
2984         if (status)
2985                 goto err;
2986
2987         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2988
2989         if (adapter->vlans_added)
2990                 be_vid_config(adapter);
2991
2992         be_set_rx_mode(adapter->netdev);
2993
2994         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2995
2996         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2997                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2998                                         adapter->rx_fc);
2999
3000         if (be_physfn(adapter) && num_vfs) {
3001                 if (adapter->dev_num_vfs)
3002                         be_vf_setup(adapter);
3003                 else
3004                         dev_warn(dev, "device doesn't support SRIOV\n");
3005         }
3006
3007         status = be_cmd_get_phy_info(adapter);
3008         if (!status && be_pause_supported(adapter))
3009                 adapter->phy.fc_autoneg = 1;
3010
3011         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3012         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3013         return 0;
3014 err:
3015         be_clear(adapter);
3016         return status;
3017 }
3018
3019 #ifdef CONFIG_NET_POLL_CONTROLLER
3020 static void be_netpoll(struct net_device *netdev)
3021 {
3022         struct be_adapter *adapter = netdev_priv(netdev);
3023         struct be_eq_obj *eqo;
3024         int i;
3025
3026         for_all_evt_queues(adapter, eqo, i)
3027                 event_handle(eqo);
3028
3029         return;
3030 }
3031 #endif
3032
3033 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3034 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3035
3036 static bool be_flash_redboot(struct be_adapter *adapter,
3037                         const u8 *p, u32 img_start, int image_size,
3038                         int hdr_size)
3039 {
3040         u32 crc_offset;
3041         u8 flashed_crc[4];
3042         int status;
3043
3044         crc_offset = hdr_size + img_start + image_size - 4;
3045
3046         p += crc_offset;
3047
3048         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3049                         (image_size - 4));
3050         if (status) {
3051                 dev_err(&adapter->pdev->dev,
3052                 "could not get crc from flash, not flashing redboot\n");
3053                 return false;
3054         }
3055
3056         /*update redboot only if crc does not match*/
3057         if (!memcmp(flashed_crc, p, 4))
3058                 return false;
3059         else
3060                 return true;
3061 }
3062
3063 static bool phy_flashing_required(struct be_adapter *adapter)
3064 {
3065         return (adapter->phy.phy_type == TN_8022 &&
3066                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3067 }
3068
3069 static bool is_comp_in_ufi(struct be_adapter *adapter,
3070                            struct flash_section_info *fsec, int type)
3071 {
3072         int i = 0, img_type = 0;
3073         struct flash_section_info_g2 *fsec_g2 = NULL;
3074
3075         if (BE2_chip(adapter))
3076                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3077
3078         for (i = 0; i < MAX_FLASH_COMP; i++) {
3079                 if (fsec_g2)
3080                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3081                 else
3082                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3083
3084                 if (img_type == type)
3085                         return true;
3086         }
3087         return false;
3088
3089 }
3090
3091 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3092                                          int header_size,
3093                                          const struct firmware *fw)
3094 {
3095         struct flash_section_info *fsec = NULL;
3096         const u8 *p = fw->data;
3097
3098         p += header_size;
3099         while (p < (fw->data + fw->size)) {
3100                 fsec = (struct flash_section_info *)p;
3101                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3102                         return fsec;
3103                 p += 32;
3104         }
3105         return NULL;
3106 }
3107
3108 static int be_flash(struct be_adapter *adapter, const u8 *img,
3109                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3110 {
3111         u32 total_bytes = 0, flash_op, num_bytes = 0;
3112         int status = 0;
3113         struct be_cmd_write_flashrom *req = flash_cmd->va;
3114
3115         total_bytes = img_size;
3116         while (total_bytes) {
3117                 num_bytes = min_t(u32, 32*1024, total_bytes);
3118
3119                 total_bytes -= num_bytes;
3120
3121                 if (!total_bytes) {
3122                         if (optype == OPTYPE_PHY_FW)
3123                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3124                         else
3125                                 flash_op = FLASHROM_OPER_FLASH;
3126                 } else {
3127                         if (optype == OPTYPE_PHY_FW)
3128                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3129                         else
3130                                 flash_op = FLASHROM_OPER_SAVE;
3131                 }
3132
3133                 memcpy(req->data_buf, img, num_bytes);
3134                 img += num_bytes;
3135                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3136                                                 flash_op, num_bytes);
3137                 if (status) {
3138                         if (status == ILLEGAL_IOCTL_REQ &&
3139                             optype == OPTYPE_PHY_FW)
3140                                 break;
3141                         dev_err(&adapter->pdev->dev,
3142                                 "cmd to write to flash rom failed.\n");
3143                         return status;
3144                 }
3145         }
3146         return 0;
3147 }
3148
3149 /* For BE2 and BE3 */
3150 static int be_flash_BEx(struct be_adapter *adapter,
3151                          const struct firmware *fw,
3152                          struct be_dma_mem *flash_cmd,
3153                          int num_of_images)
3154
3155 {
3156         int status = 0, i, filehdr_size = 0;
3157         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3158         const u8 *p = fw->data;
3159         const struct flash_comp *pflashcomp;
3160         int num_comp, redboot;
3161         struct flash_section_info *fsec = NULL;
3162
3163         struct flash_comp gen3_flash_types[] = {
3164                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3165                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3166                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3167                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3168                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3169                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3170                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3171                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3172                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3173                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3174                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3175                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3176                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3177                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3178                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3179                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3180                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3181                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3182                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3183                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3184         };
3185
3186         struct flash_comp gen2_flash_types[] = {
3187                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3188                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3189                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3190                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3191                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3192                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3193                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3194                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3195                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3196                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3197                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3198                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3199                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3200                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3201                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3202                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3203         };
3204
3205         if (BE3_chip(adapter)) {
3206                 pflashcomp = gen3_flash_types;
3207                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3208                 num_comp = ARRAY_SIZE(gen3_flash_types);
3209         } else {
3210                 pflashcomp = gen2_flash_types;
3211                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3212                 num_comp = ARRAY_SIZE(gen2_flash_types);
3213         }
3214
3215         /* Get flash section info*/
3216         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3217         if (!fsec) {
3218                 dev_err(&adapter->pdev->dev,
3219                         "Invalid Cookie. UFI corrupted ?\n");
3220                 return -1;
3221         }
3222         for (i = 0; i < num_comp; i++) {
3223                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3224                         continue;
3225
3226                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3227                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3228                         continue;
3229
3230                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3231                     !phy_flashing_required(adapter))
3232                                 continue;
3233
3234                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3235                         redboot = be_flash_redboot(adapter, fw->data,
3236                                 pflashcomp[i].offset, pflashcomp[i].size,
3237                                 filehdr_size + img_hdrs_size);
3238                         if (!redboot)
3239                                 continue;
3240                 }
3241
3242                 p = fw->data;
3243                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3244                 if (p + pflashcomp[i].size > fw->data + fw->size)
3245                         return -1;
3246
3247                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3248                                         pflashcomp[i].size);
3249                 if (status) {
3250                         dev_err(&adapter->pdev->dev,
3251                                 "Flashing section type %d failed.\n",
3252                                 pflashcomp[i].img_type);
3253                         return status;
3254                 }
3255         }
3256         return 0;
3257 }
3258
3259 static int be_flash_skyhawk(struct be_adapter *adapter,
3260                 const struct firmware *fw,
3261                 struct be_dma_mem *flash_cmd, int num_of_images)
3262 {
3263         int status = 0, i, filehdr_size = 0;
3264         int img_offset, img_size, img_optype, redboot;
3265         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3266         const u8 *p = fw->data;
3267         struct flash_section_info *fsec = NULL;
3268
3269         filehdr_size = sizeof(struct flash_file_hdr_g3);
3270         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3271         if (!fsec) {
3272                 dev_err(&adapter->pdev->dev,
3273                         "Invalid Cookie. UFI corrupted ?\n");
3274                 return -1;
3275         }
3276
3277         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3278                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3279                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3280
3281                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3282                 case IMAGE_FIRMWARE_iSCSI:
3283                         img_optype = OPTYPE_ISCSI_ACTIVE;
3284                         break;
3285                 case IMAGE_BOOT_CODE:
3286                         img_optype = OPTYPE_REDBOOT;
3287                         break;
3288                 case IMAGE_OPTION_ROM_ISCSI:
3289                         img_optype = OPTYPE_BIOS;
3290                         break;
3291                 case IMAGE_OPTION_ROM_PXE:
3292                         img_optype = OPTYPE_PXE_BIOS;
3293                         break;
3294                 case IMAGE_OPTION_ROM_FCoE:
3295                         img_optype = OPTYPE_FCOE_BIOS;
3296                         break;
3297                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3298                         img_optype = OPTYPE_ISCSI_BACKUP;
3299                         break;
3300                 case IMAGE_NCSI:
3301                         img_optype = OPTYPE_NCSI_FW;
3302                         break;
3303                 default:
3304                         continue;
3305                 }
3306
3307                 if (img_optype == OPTYPE_REDBOOT) {
3308                         redboot = be_flash_redboot(adapter, fw->data,
3309                                         img_offset, img_size,
3310                                         filehdr_size + img_hdrs_size);
3311                         if (!redboot)
3312                                 continue;
3313                 }
3314
3315                 p = fw->data;
3316                 p += filehdr_size + img_offset + img_hdrs_size;
3317                 if (p + img_size > fw->data + fw->size)
3318                         return -1;
3319
3320                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3321                 if (status) {
3322                         dev_err(&adapter->pdev->dev,
3323                                 "Flashing section type %d failed.\n",
3324                                 fsec->fsec_entry[i].type);
3325                         return status;
3326                 }
3327         }
3328         return 0;
3329 }
3330
3331 static int lancer_wait_idle(struct be_adapter *adapter)
3332 {
3333 #define SLIPORT_IDLE_TIMEOUT 30
3334         u32 reg_val;
3335         int status = 0, i;
3336
3337         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3338                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3339                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3340                         break;
3341
3342                 ssleep(1);
3343         }
3344
3345         if (i == SLIPORT_IDLE_TIMEOUT)
3346                 status = -1;
3347
3348         return status;
3349 }
3350
3351 static int lancer_fw_reset(struct be_adapter *adapter)
3352 {
3353         int status = 0;
3354
3355         status = lancer_wait_idle(adapter);
3356         if (status)
3357                 return status;
3358
3359         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3360                   PHYSDEV_CONTROL_OFFSET);
3361
3362         return status;
3363 }
3364
3365 static int lancer_fw_download(struct be_adapter *adapter,
3366                                 const struct firmware *fw)
3367 {
3368 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3369 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3370         struct be_dma_mem flash_cmd;
3371         const u8 *data_ptr = NULL;
3372         u8 *dest_image_ptr = NULL;
3373         size_t image_size = 0;
3374         u32 chunk_size = 0;
3375         u32 data_written = 0;
3376         u32 offset = 0;
3377         int status = 0;
3378         u8 add_status = 0;
3379         u8 change_status;
3380
3381         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3382                 dev_err(&adapter->pdev->dev,
3383                         "FW Image not properly aligned. "
3384                         "Length must be 4 byte aligned.\n");
3385                 status = -EINVAL;
3386                 goto lancer_fw_exit;
3387         }
3388
3389         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3390                                 + LANCER_FW_DOWNLOAD_CHUNK;
3391         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3392                                                 &flash_cmd.dma, GFP_KERNEL);
3393         if (!flash_cmd.va) {
3394                 status = -ENOMEM;
3395                 dev_err(&adapter->pdev->dev,
3396                         "Memory allocation failure while flashing\n");
3397                 goto lancer_fw_exit;
3398         }
3399
3400         dest_image_ptr = flash_cmd.va +
3401                                 sizeof(struct lancer_cmd_req_write_object);
3402         image_size = fw->size;
3403         data_ptr = fw->data;
3404
3405         while (image_size) {
3406                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3407
3408                 /* Copy the image chunk content. */
3409                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3410
3411                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3412                                                  chunk_size, offset,
3413                                                  LANCER_FW_DOWNLOAD_LOCATION,
3414                                                  &data_written, &change_status,
3415                                                  &add_status);
3416                 if (status)
3417                         break;
3418
3419                 offset += data_written;
3420                 data_ptr += data_written;
3421                 image_size -= data_written;
3422         }
3423
3424         if (!status) {
3425                 /* Commit the FW written */
3426                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3427                                                  0, offset,
3428                                                  LANCER_FW_DOWNLOAD_LOCATION,
3429                                                  &data_written, &change_status,
3430                                                  &add_status);
3431         }
3432
3433         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3434                                 flash_cmd.dma);
3435         if (status) {
3436                 dev_err(&adapter->pdev->dev,
3437                         "Firmware load error. "
3438                         "Status code: 0x%x Additional Status: 0x%x\n",
3439                         status, add_status);
3440                 goto lancer_fw_exit;
3441         }
3442
3443         if (change_status == LANCER_FW_RESET_NEEDED) {
3444                 status = lancer_fw_reset(adapter);
3445                 if (status) {
3446                         dev_err(&adapter->pdev->dev,
3447                                 "Adapter busy for FW reset.\n"
3448                                 "New FW will not be active.\n");
3449                         goto lancer_fw_exit;
3450                 }
3451         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3452                         dev_err(&adapter->pdev->dev,
3453                                 "System reboot required for new FW"
3454                                 " to be active\n");
3455         }
3456
3457         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3458 lancer_fw_exit:
3459         return status;
3460 }
3461
3462 #define UFI_TYPE2               2
3463 #define UFI_TYPE3               3
3464 #define UFI_TYPE4               4
3465 static int be_get_ufi_type(struct be_adapter *adapter,
3466                            struct flash_file_hdr_g2 *fhdr)
3467 {
3468         if (fhdr == NULL)
3469                 goto be_get_ufi_exit;
3470
3471         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3472                 return UFI_TYPE4;
3473         else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3474                 return UFI_TYPE3;
3475         else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3476                 return UFI_TYPE2;
3477
3478 be_get_ufi_exit:
3479         dev_err(&adapter->pdev->dev,
3480                 "UFI and Interface are not compatible for flashing\n");
3481         return -1;
3482 }
3483
3484 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3485 {
3486         struct flash_file_hdr_g2 *fhdr;
3487         struct flash_file_hdr_g3 *fhdr3;
3488         struct image_hdr *img_hdr_ptr = NULL;
3489         struct be_dma_mem flash_cmd;
3490         const u8 *p;
3491         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3492
3493         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3494         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3495                                           &flash_cmd.dma, GFP_KERNEL);
3496         if (!flash_cmd.va) {
3497                 status = -ENOMEM;
3498                 dev_err(&adapter->pdev->dev,
3499                         "Memory allocation failure while flashing\n");
3500                 goto be_fw_exit;
3501         }
3502
3503         p = fw->data;
3504         fhdr = (struct flash_file_hdr_g2 *)p;
3505
3506         ufi_type = be_get_ufi_type(adapter, fhdr);
3507
3508         fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3509         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3510         for (i = 0; i < num_imgs; i++) {
3511                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3512                                 (sizeof(struct flash_file_hdr_g3) +
3513                                  i * sizeof(struct image_hdr)));
3514                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3515                         if (ufi_type == UFI_TYPE4)
3516                                 status = be_flash_skyhawk(adapter, fw,
3517                                                         &flash_cmd, num_imgs);
3518                         else if (ufi_type == UFI_TYPE3)
3519                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3520                                                       num_imgs);
3521                 }
3522         }
3523
3524         if (ufi_type == UFI_TYPE2)
3525                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3526         else if (ufi_type == -1)
3527                 status = -1;
3528
3529         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3530                           flash_cmd.dma);
3531         if (status) {
3532                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3533                 goto be_fw_exit;
3534         }
3535
3536         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3537
3538 be_fw_exit:
3539         return status;
3540 }
3541
3542 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3543 {
3544         const struct firmware *fw;
3545         int status;
3546
3547         if (!netif_running(adapter->netdev)) {
3548                 dev_err(&adapter->pdev->dev,
3549                         "Firmware load not allowed (interface is down)\n");
3550                 return -1;
3551         }
3552
3553         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3554         if (status)
3555                 goto fw_exit;
3556
3557         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3558
3559         if (lancer_chip(adapter))
3560                 status = lancer_fw_download(adapter, fw);
3561         else
3562                 status = be_fw_download(adapter, fw);
3563
3564 fw_exit:
3565         release_firmware(fw);
3566         return status;
3567 }
3568
3569 static const struct net_device_ops be_netdev_ops = {
3570         .ndo_open               = be_open,
3571         .ndo_stop               = be_close,
3572         .ndo_start_xmit         = be_xmit,
3573         .ndo_set_rx_mode        = be_set_rx_mode,
3574         .ndo_set_mac_address    = be_mac_addr_set,
3575         .ndo_change_mtu         = be_change_mtu,
3576         .ndo_get_stats64        = be_get_stats64,
3577         .ndo_validate_addr      = eth_validate_addr,
3578         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3579         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3580         .ndo_set_vf_mac         = be_set_vf_mac,
3581         .ndo_set_vf_vlan        = be_set_vf_vlan,
3582         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3583         .ndo_get_vf_config      = be_get_vf_config,
3584 #ifdef CONFIG_NET_POLL_CONTROLLER
3585         .ndo_poll_controller    = be_netpoll,
3586 #endif
3587 };
3588
3589 static void be_netdev_init(struct net_device *netdev)
3590 {
3591         struct be_adapter *adapter = netdev_priv(netdev);
3592         struct be_eq_obj *eqo;
3593         int i;
3594
3595         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3596                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3597                 NETIF_F_HW_VLAN_TX;
3598         if (be_multi_rxq(adapter))
3599                 netdev->hw_features |= NETIF_F_RXHASH;
3600
3601         netdev->features |= netdev->hw_features |
3602                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3603
3604         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3605                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3606
3607         netdev->priv_flags |= IFF_UNICAST_FLT;
3608
3609         netdev->flags |= IFF_MULTICAST;
3610
3611         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3612
3613         netdev->netdev_ops = &be_netdev_ops;
3614
3615         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3616
3617         for_all_evt_queues(adapter, eqo, i)
3618                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3619 }
3620
3621 static void be_unmap_pci_bars(struct be_adapter *adapter)
3622 {
3623         if (adapter->db)
3624                 pci_iounmap(adapter->pdev, adapter->db);
3625 }
3626
3627 static int db_bar(struct be_adapter *adapter)
3628 {
3629         if (lancer_chip(adapter) || !be_physfn(adapter))
3630                 return 0;
3631         else
3632                 return 4;
3633 }
3634
3635 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3636 {
3637         if (skyhawk_chip(adapter)) {
3638                 adapter->roce_db.size = 4096;
3639                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3640                                                               db_bar(adapter));
3641                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3642                                                                db_bar(adapter));
3643         }
3644         return 0;
3645 }
3646
3647 static int be_map_pci_bars(struct be_adapter *adapter)
3648 {
3649         u8 __iomem *addr;
3650         u32 sli_intf;
3651
3652         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3653         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3654                                 SLI_INTF_IF_TYPE_SHIFT;
3655
3656         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3657         if (addr == NULL)
3658                 goto pci_map_err;
3659         adapter->db = addr;
3660
3661         be_roce_map_pci_bars(adapter);
3662         return 0;
3663
3664 pci_map_err:
3665         be_unmap_pci_bars(adapter);
3666         return -ENOMEM;
3667 }
3668
3669 static void be_ctrl_cleanup(struct be_adapter *adapter)
3670 {
3671         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3672
3673         be_unmap_pci_bars(adapter);
3674
3675         if (mem->va)
3676                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3677                                   mem->dma);
3678
3679         mem = &adapter->rx_filter;
3680         if (mem->va)
3681                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3682                                   mem->dma);
3683 }
3684
3685 static int be_ctrl_init(struct be_adapter *adapter)
3686 {
3687         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3688         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3689         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3690         u32 sli_intf;
3691         int status;
3692
3693         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3694         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3695                                  SLI_INTF_FAMILY_SHIFT;
3696         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3697
3698         status = be_map_pci_bars(adapter);
3699         if (status)
3700                 goto done;
3701
3702         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3703         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3704                                                 mbox_mem_alloc->size,
3705                                                 &mbox_mem_alloc->dma,
3706                                                 GFP_KERNEL);
3707         if (!mbox_mem_alloc->va) {
3708                 status = -ENOMEM;
3709                 goto unmap_pci_bars;
3710         }
3711         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3712         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3713         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3714         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3715
3716         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3717         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3718                                         &rx_filter->dma, GFP_KERNEL);
3719         if (rx_filter->va == NULL) {
3720                 status = -ENOMEM;
3721                 goto free_mbox;
3722         }
3723         memset(rx_filter->va, 0, rx_filter->size);
3724         mutex_init(&adapter->mbox_lock);
3725         spin_lock_init(&adapter->mcc_lock);
3726         spin_lock_init(&adapter->mcc_cq_lock);
3727
3728         init_completion(&adapter->flash_compl);
3729         pci_save_state(adapter->pdev);
3730         return 0;
3731
3732 free_mbox:
3733         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3734                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3735
3736 unmap_pci_bars:
3737         be_unmap_pci_bars(adapter);
3738
3739 done:
3740         return status;
3741 }
3742
3743 static void be_stats_cleanup(struct be_adapter *adapter)
3744 {
3745         struct be_dma_mem *cmd = &adapter->stats_cmd;
3746
3747         if (cmd->va)
3748                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3749                                   cmd->va, cmd->dma);
3750 }
3751
3752 static int be_stats_init(struct be_adapter *adapter)
3753 {
3754         struct be_dma_mem *cmd = &adapter->stats_cmd;
3755
3756         if (lancer_chip(adapter))
3757                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3758         else if (BE2_chip(adapter))
3759                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3760         else
3761                 /* BE3 and Skyhawk */
3762                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3763
3764         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3765                                      GFP_KERNEL);
3766         if (cmd->va == NULL)
3767                 return -1;
3768         memset(cmd->va, 0, cmd->size);
3769         return 0;
3770 }
3771
3772 static void __devexit be_remove(struct pci_dev *pdev)
3773 {
3774         struct be_adapter *adapter = pci_get_drvdata(pdev);
3775
3776         if (!adapter)
3777                 return;
3778
3779         be_roce_dev_remove(adapter);
3780
3781         cancel_delayed_work_sync(&adapter->func_recovery_work);
3782
3783         unregister_netdev(adapter->netdev);
3784
3785         be_clear(adapter);
3786
3787         /* tell fw we're done with firing cmds */
3788         be_cmd_fw_clean(adapter);
3789
3790         be_stats_cleanup(adapter);
3791
3792         be_ctrl_cleanup(adapter);
3793
3794         pci_disable_pcie_error_reporting(pdev);
3795
3796         pci_set_drvdata(pdev, NULL);
3797         pci_release_regions(pdev);
3798         pci_disable_device(pdev);
3799
3800         free_netdev(adapter->netdev);
3801 }
3802
3803 bool be_is_wol_supported(struct be_adapter *adapter)
3804 {
3805         return ((adapter->wol_cap & BE_WOL_CAP) &&
3806                 !be_is_wol_excluded(adapter)) ? true : false;
3807 }
3808
3809 u32 be_get_fw_log_level(struct be_adapter *adapter)
3810 {
3811         struct be_dma_mem extfat_cmd;
3812         struct be_fat_conf_params *cfgs;
3813         int status;
3814         u32 level = 0;
3815         int j;
3816
3817         if (lancer_chip(adapter))
3818                 return 0;
3819
3820         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3821         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3822         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3823                                              &extfat_cmd.dma);
3824
3825         if (!extfat_cmd.va) {
3826                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3827                         __func__);
3828                 goto err;
3829         }
3830
3831         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3832         if (!status) {
3833                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3834                                                 sizeof(struct be_cmd_resp_hdr));
3835                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3836                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3837                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3838                 }
3839         }
3840         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3841                             extfat_cmd.dma);
3842 err:
3843         return level;
3844 }
3845
3846 static int be_get_initial_config(struct be_adapter *adapter)
3847 {
3848         int status;
3849         u32 level;
3850
3851         status = be_cmd_get_cntl_attributes(adapter);
3852         if (status)
3853                 return status;
3854
3855         status = be_cmd_get_acpi_wol_cap(adapter);
3856         if (status) {
3857                 /* in case of a failure to get wol capabillities
3858                  * check the exclusion list to determine WOL capability */
3859                 if (!be_is_wol_excluded(adapter))
3860                         adapter->wol_cap |= BE_WOL_CAP;
3861         }
3862
3863         if (be_is_wol_supported(adapter))
3864                 adapter->wol = true;
3865
3866         /* Must be a power of 2 or else MODULO will BUG_ON */
3867         adapter->be_get_temp_freq = 64;
3868
3869         level = be_get_fw_log_level(adapter);
3870         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3871
3872         return 0;
3873 }
3874
3875 static int lancer_recover_func(struct be_adapter *adapter)
3876 {
3877         int status;
3878
3879         status = lancer_test_and_set_rdy_state(adapter);
3880         if (status)
3881                 goto err;
3882
3883         if (netif_running(adapter->netdev))
3884                 be_close(adapter->netdev);
3885
3886         be_clear(adapter);
3887
3888         adapter->hw_error = false;
3889         adapter->fw_timeout = false;
3890
3891         status = be_setup(adapter);
3892         if (status)
3893                 goto err;
3894
3895         if (netif_running(adapter->netdev)) {
3896                 status = be_open(adapter->netdev);
3897                 if (status)
3898                         goto err;
3899         }
3900
3901         dev_err(&adapter->pdev->dev,
3902                 "Adapter SLIPORT recovery succeeded\n");
3903         return 0;
3904 err:
3905         if (adapter->eeh_error)
3906                 dev_err(&adapter->pdev->dev,
3907                         "Adapter SLIPORT recovery failed\n");
3908
3909         return status;
3910 }
3911
3912 static void be_func_recovery_task(struct work_struct *work)
3913 {
3914         struct be_adapter *adapter =
3915                 container_of(work, struct be_adapter,  func_recovery_work.work);
3916         int status;
3917
3918         be_detect_error(adapter);
3919
3920         if (adapter->hw_error && lancer_chip(adapter)) {
3921
3922                 if (adapter->eeh_error)
3923                         goto out;
3924
3925                 rtnl_lock();
3926                 netif_device_detach(adapter->netdev);
3927                 rtnl_unlock();
3928
3929                 status = lancer_recover_func(adapter);
3930
3931                 if (!status)
3932                         netif_device_attach(adapter->netdev);
3933         }
3934
3935 out:
3936         schedule_delayed_work(&adapter->func_recovery_work,
3937                               msecs_to_jiffies(1000));
3938 }
3939
3940 static void be_worker(struct work_struct *work)
3941 {
3942         struct be_adapter *adapter =
3943                 container_of(work, struct be_adapter, work.work);
3944         struct be_rx_obj *rxo;
3945         struct be_eq_obj *eqo;
3946         int i;
3947
3948         /* when interrupts are not yet enabled, just reap any pending
3949         * mcc completions */
3950         if (!netif_running(adapter->netdev)) {
3951                 local_bh_disable();
3952                 be_process_mcc(adapter);
3953                 local_bh_enable();
3954                 goto reschedule;
3955         }
3956
3957         if (!adapter->stats_cmd_sent) {
3958                 if (lancer_chip(adapter))
3959                         lancer_cmd_get_pport_stats(adapter,
3960                                                 &adapter->stats_cmd);
3961                 else
3962                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3963         }
3964
3965         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3966                 be_cmd_get_die_temperature(adapter);
3967
3968         for_all_rx_queues(adapter, rxo, i) {
3969                 if (rxo->rx_post_starved) {
3970                         rxo->rx_post_starved = false;
3971                         be_post_rx_frags(rxo, GFP_KERNEL);
3972                 }
3973         }
3974
3975         for_all_evt_queues(adapter, eqo, i)
3976                 be_eqd_update(adapter, eqo);
3977
3978 reschedule:
3979         adapter->work_counter++;
3980         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3981 }
3982
3983 static bool be_reset_required(struct be_adapter *adapter)
3984 {
3985         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3986 }
3987
3988 static char *mc_name(struct be_adapter *adapter)
3989 {
3990         if (adapter->function_mode & FLEX10_MODE)
3991                 return "FLEX10";
3992         else if (adapter->function_mode & VNIC_MODE)
3993                 return "vNIC";
3994         else if (adapter->function_mode & UMC_ENABLED)
3995                 return "UMC";
3996         else
3997                 return "";
3998 }
3999
4000 static inline char *func_name(struct be_adapter *adapter)
4001 {
4002         return be_physfn(adapter) ? "PF" : "VF";
4003 }
4004
4005 static int __devinit be_probe(struct pci_dev *pdev,
4006                         const struct pci_device_id *pdev_id)
4007 {
4008         int status = 0;
4009         struct be_adapter *adapter;
4010         struct net_device *netdev;
4011         char port_name;
4012
4013         status = pci_enable_device(pdev);
4014         if (status)
4015                 goto do_none;
4016
4017         status = pci_request_regions(pdev, DRV_NAME);
4018         if (status)
4019                 goto disable_dev;
4020         pci_set_master(pdev);
4021
4022         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4023         if (netdev == NULL) {
4024                 status = -ENOMEM;
4025                 goto rel_reg;
4026         }
4027         adapter = netdev_priv(netdev);
4028         adapter->pdev = pdev;
4029         pci_set_drvdata(pdev, adapter);
4030         adapter->netdev = netdev;
4031         SET_NETDEV_DEV(netdev, &pdev->dev);
4032
4033         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4034         if (!status) {
4035                 netdev->features |= NETIF_F_HIGHDMA;
4036         } else {
4037                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4038                 if (status) {
4039                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4040                         goto free_netdev;
4041                 }
4042         }
4043
4044         status = pci_enable_pcie_error_reporting(pdev);
4045         if (status)
4046                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4047
4048         status = be_ctrl_init(adapter);
4049         if (status)
4050                 goto free_netdev;
4051
4052         /* sync up with fw's ready state */
4053         if (be_physfn(adapter)) {
4054                 status = be_fw_wait_ready(adapter);
4055                 if (status)
4056                         goto ctrl_clean;
4057         }
4058
4059         /* tell fw we're ready to fire cmds */
4060         status = be_cmd_fw_init(adapter);
4061         if (status)
4062                 goto ctrl_clean;
4063
4064         if (be_reset_required(adapter)) {
4065                 status = be_cmd_reset_function(adapter);
4066                 if (status)
4067                         goto ctrl_clean;
4068         }
4069
4070         /* The INTR bit may be set in the card when probed by a kdump kernel
4071          * after a crash.
4072          */
4073         if (!lancer_chip(adapter))
4074                 be_intr_set(adapter, false);
4075
4076         status = be_stats_init(adapter);
4077         if (status)
4078                 goto ctrl_clean;
4079
4080         status = be_get_initial_config(adapter);
4081         if (status)
4082                 goto stats_clean;
4083
4084         INIT_DELAYED_WORK(&adapter->work, be_worker);
4085         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4086         adapter->rx_fc = adapter->tx_fc = true;
4087
4088         status = be_setup(adapter);
4089         if (status)
4090                 goto stats_clean;
4091
4092         be_netdev_init(netdev);
4093         status = register_netdev(netdev);
4094         if (status != 0)
4095                 goto unsetup;
4096
4097         be_roce_dev_add(adapter);
4098
4099         schedule_delayed_work(&adapter->func_recovery_work,
4100                               msecs_to_jiffies(1000));
4101
4102         be_cmd_query_port_name(adapter, &port_name);
4103
4104         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4105                  func_name(adapter), mc_name(adapter), port_name);
4106
4107         return 0;
4108
4109 unsetup:
4110         be_clear(adapter);
4111 stats_clean:
4112         be_stats_cleanup(adapter);
4113 ctrl_clean:
4114         be_ctrl_cleanup(adapter);
4115 free_netdev:
4116         free_netdev(netdev);
4117         pci_set_drvdata(pdev, NULL);
4118 rel_reg:
4119         pci_release_regions(pdev);
4120 disable_dev:
4121         pci_disable_device(pdev);
4122 do_none:
4123         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4124         return status;
4125 }
4126
4127 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4128 {
4129         struct be_adapter *adapter = pci_get_drvdata(pdev);
4130         struct net_device *netdev =  adapter->netdev;
4131
4132         if (adapter->wol)
4133                 be_setup_wol(adapter, true);
4134
4135         cancel_delayed_work_sync(&adapter->func_recovery_work);
4136
4137         netif_device_detach(netdev);
4138         if (netif_running(netdev)) {
4139                 rtnl_lock();
4140                 be_close(netdev);
4141                 rtnl_unlock();
4142         }
4143         be_clear(adapter);
4144
4145         pci_save_state(pdev);
4146         pci_disable_device(pdev);
4147         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4148         return 0;
4149 }
4150
4151 static int be_resume(struct pci_dev *pdev)
4152 {
4153         int status = 0;
4154         struct be_adapter *adapter = pci_get_drvdata(pdev);
4155         struct net_device *netdev =  adapter->netdev;
4156
4157         netif_device_detach(netdev);
4158
4159         status = pci_enable_device(pdev);
4160         if (status)
4161                 return status;
4162
4163         pci_set_power_state(pdev, 0);
4164         pci_restore_state(pdev);
4165
4166         /* tell fw we're ready to fire cmds */
4167         status = be_cmd_fw_init(adapter);
4168         if (status)
4169                 return status;
4170
4171         be_setup(adapter);
4172         if (netif_running(netdev)) {
4173                 rtnl_lock();
4174                 be_open(netdev);
4175                 rtnl_unlock();
4176         }
4177
4178         schedule_delayed_work(&adapter->func_recovery_work,
4179                               msecs_to_jiffies(1000));
4180         netif_device_attach(netdev);
4181
4182         if (adapter->wol)
4183                 be_setup_wol(adapter, false);
4184
4185         return 0;
4186 }
4187
4188 /*
4189  * An FLR will stop BE from DMAing any data.
4190  */
4191 static void be_shutdown(struct pci_dev *pdev)
4192 {
4193         struct be_adapter *adapter = pci_get_drvdata(pdev);
4194
4195         if (!adapter)
4196                 return;
4197
4198         cancel_delayed_work_sync(&adapter->work);
4199         cancel_delayed_work_sync(&adapter->func_recovery_work);
4200
4201         netif_device_detach(adapter->netdev);
4202
4203         be_cmd_reset_function(adapter);
4204
4205         pci_disable_device(pdev);
4206 }
4207
4208 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4209                                 pci_channel_state_t state)
4210 {
4211         struct be_adapter *adapter = pci_get_drvdata(pdev);
4212         struct net_device *netdev =  adapter->netdev;
4213
4214         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4215
4216         adapter->eeh_error = true;
4217
4218         cancel_delayed_work_sync(&adapter->func_recovery_work);
4219
4220         rtnl_lock();
4221         netif_device_detach(netdev);
4222         rtnl_unlock();
4223
4224         if (netif_running(netdev)) {
4225                 rtnl_lock();
4226                 be_close(netdev);
4227                 rtnl_unlock();
4228         }
4229         be_clear(adapter);
4230
4231         if (state == pci_channel_io_perm_failure)
4232                 return PCI_ERS_RESULT_DISCONNECT;
4233
4234         pci_disable_device(pdev);
4235
4236         /* The error could cause the FW to trigger a flash debug dump.
4237          * Resetting the card while flash dump is in progress
4238          * can cause it not to recover; wait for it to finish.
4239          * Wait only for first function as it is needed only once per
4240          * adapter.
4241          */
4242         if (pdev->devfn == 0)
4243                 ssleep(30);
4244
4245         return PCI_ERS_RESULT_NEED_RESET;
4246 }
4247
4248 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4249 {
4250         struct be_adapter *adapter = pci_get_drvdata(pdev);
4251         int status;
4252
4253         dev_info(&adapter->pdev->dev, "EEH reset\n");
4254         be_clear_all_error(adapter);
4255
4256         status = pci_enable_device(pdev);
4257         if (status)
4258                 return PCI_ERS_RESULT_DISCONNECT;
4259
4260         pci_set_master(pdev);
4261         pci_set_power_state(pdev, 0);
4262         pci_restore_state(pdev);
4263
4264         /* Check if card is ok and fw is ready */
4265         status = be_fw_wait_ready(adapter);
4266         if (status)
4267                 return PCI_ERS_RESULT_DISCONNECT;
4268
4269         pci_cleanup_aer_uncorrect_error_status(pdev);
4270         return PCI_ERS_RESULT_RECOVERED;
4271 }
4272
4273 static void be_eeh_resume(struct pci_dev *pdev)
4274 {
4275         int status = 0;
4276         struct be_adapter *adapter = pci_get_drvdata(pdev);
4277         struct net_device *netdev =  adapter->netdev;
4278
4279         dev_info(&adapter->pdev->dev, "EEH resume\n");
4280
4281         pci_save_state(pdev);
4282
4283         /* tell fw we're ready to fire cmds */
4284         status = be_cmd_fw_init(adapter);
4285         if (status)
4286                 goto err;
4287
4288         status = be_cmd_reset_function(adapter);
4289         if (status)
4290                 goto err;
4291
4292         status = be_setup(adapter);
4293         if (status)
4294                 goto err;
4295
4296         if (netif_running(netdev)) {
4297                 status = be_open(netdev);
4298                 if (status)
4299                         goto err;
4300         }
4301
4302         schedule_delayed_work(&adapter->func_recovery_work,
4303                               msecs_to_jiffies(1000));
4304         netif_device_attach(netdev);
4305         return;
4306 err:
4307         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4308 }
4309
4310 static const struct pci_error_handlers be_eeh_handlers = {
4311         .error_detected = be_eeh_err_detected,
4312         .slot_reset = be_eeh_reset,
4313         .resume = be_eeh_resume,
4314 };
4315
4316 static struct pci_driver be_driver = {
4317         .name = DRV_NAME,
4318         .id_table = be_dev_ids,
4319         .probe = be_probe,
4320         .remove = be_remove,
4321         .suspend = be_suspend,
4322         .resume = be_resume,
4323         .shutdown = be_shutdown,
4324         .err_handler = &be_eeh_handlers
4325 };
4326
4327 static int __init be_init_module(void)
4328 {
4329         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4330             rx_frag_size != 2048) {
4331                 printk(KERN_WARNING DRV_NAME
4332                         " : Module param rx_frag_size must be 2048/4096/8192."
4333                         " Using 2048\n");
4334                 rx_frag_size = 2048;
4335         }
4336
4337         return pci_register_driver(&be_driver);
4338 }
4339 module_init(be_init_module);
4340
4341 static void __exit be_exit_module(void)
4342 {
4343         pci_unregister_driver(&be_driver);
4344 }
4345 module_exit(be_exit_module);