Merge branch 'for-3.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <linux/workqueue.h>
45 #include <linux/crc32.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51
52 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
53 #define BCM_CNIC 1
54 #include "cnic_if.h"
55 #endif
56 #include "bnx2.h"
57 #include "bnx2_fw.h"
58
59 #define DRV_MODULE_NAME         "bnx2"
60 #define DRV_MODULE_VERSION      "2.2.5"
61 #define DRV_MODULE_RELDATE      "December 20, 2013"
62 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.2.3.fw"
63 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
64 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.2.1b.fw"
65 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
66 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
67
68 #define RUN_AT(x) (jiffies + (x))
69
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT  (5*HZ)
72
73 static char version[] =
74         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80 MODULE_FIRMWARE(FW_MIPS_FILE_06);
81 MODULE_FIRMWARE(FW_RV2P_FILE_06);
82 MODULE_FIRMWARE(FW_MIPS_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
85
86 static int disable_msi = 0;
87
88 module_param(disable_msi, int, S_IRUGO);
89 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90
91 typedef enum {
92         BCM5706 = 0,
93         NC370T,
94         NC370I,
95         BCM5706S,
96         NC370F,
97         BCM5708,
98         BCM5708S,
99         BCM5709,
100         BCM5709S,
101         BCM5716,
102         BCM5716S,
103 } board_t;
104
105 /* indexed by board_t, above */
106 static struct {
107         char *name;
108 } board_info[] = {
109         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
110         { "HP NC370T Multifunction Gigabit Server Adapter" },
111         { "HP NC370i Multifunction Gigabit Server Adapter" },
112         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113         { "HP NC370F Multifunction Gigabit Server Adapter" },
114         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
116         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
118         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
120         };
121
122 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
141         { PCI_VENDOR_ID_BROADCOM, 0x163b,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
143         { PCI_VENDOR_ID_BROADCOM, 0x163c,
144           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
145         { 0, }
146 };
147
148 static const struct flash_spec flash_table[] =
149 {
150 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
152         /* Slow EEPROM */
153         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
154          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
155          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
156          "EEPROM - slow"},
157         /* Expansion entry 0001 */
158         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
159          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
160          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
161          "Entry 0001"},
162         /* Saifun SA25F010 (non-buffered flash) */
163         /* strap, cfg1, & write1 need updates */
164         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
165          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
167          "Non-buffered flash (128kB)"},
168         /* Saifun SA25F020 (non-buffered flash) */
169         /* strap, cfg1, & write1 need updates */
170         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
171          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
173          "Non-buffered flash (256kB)"},
174         /* Expansion entry 0100 */
175         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
176          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
177          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178          "Entry 0100"},
179         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
180         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
181          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
183          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
185         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
186          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
187          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
188          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189         /* Saifun SA25F005 (non-buffered flash) */
190         /* strap, cfg1, & write1 need updates */
191         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
192          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
194          "Non-buffered flash (64kB)"},
195         /* Fast EEPROM */
196         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
197          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
198          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
199          "EEPROM - fast"},
200         /* Expansion entry 1001 */
201         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
202          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204          "Entry 1001"},
205         /* Expansion entry 1010 */
206         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
207          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209          "Entry 1010"},
210         /* ATMEL AT45DB011B (buffered flash) */
211         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
212          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
214          "Buffered flash (128kB)"},
215         /* Expansion entry 1100 */
216         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
217          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219          "Entry 1100"},
220         /* Expansion entry 1101 */
221         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
222          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224          "Entry 1101"},
225         /* Ateml Expansion entry 1110 */
226         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
227          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
229          "Entry 1110 (Atmel)"},
230         /* ATMEL AT45DB021B (buffered flash) */
231         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
232          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
233          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
234          "Buffered flash (256kB)"},
235 };
236
237 static const struct flash_spec flash_5709 = {
238         .flags          = BNX2_NV_BUFFERED,
239         .page_bits      = BCM5709_FLASH_PAGE_BITS,
240         .page_size      = BCM5709_FLASH_PAGE_SIZE,
241         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
242         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
243         .name           = "5709 Buffered flash (256kB)",
244 };
245
246 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
247
248 static void bnx2_init_napi(struct bnx2 *bp);
249 static void bnx2_del_napi(struct bnx2 *bp);
250
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 {
253         u32 diff;
254
255         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
256         barrier();
257
258         /* The ring uses 256 indices for 255 entries, one of them
259          * needs to be skipped.
260          */
261         diff = txr->tx_prod - txr->tx_cons;
262         if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
263                 diff &= 0xffff;
264                 if (diff == BNX2_TX_DESC_CNT)
265                         diff = BNX2_MAX_TX_DESC_CNT;
266         }
267         return bp->tx_ring_size - diff;
268 }
269
270 static u32
271 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272 {
273         u32 val;
274
275         spin_lock_bh(&bp->indirect_lock);
276         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277         val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
278         spin_unlock_bh(&bp->indirect_lock);
279         return val;
280 }
281
282 static void
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 {
285         spin_lock_bh(&bp->indirect_lock);
286         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
288         spin_unlock_bh(&bp->indirect_lock);
289 }
290
291 static void
292 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293 {
294         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
295 }
296
297 static u32
298 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299 {
300         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
301 }
302
303 static void
304 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305 {
306         offset += cid_addr;
307         spin_lock_bh(&bp->indirect_lock);
308         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
309                 int i;
310
311                 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
312                 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
313                         offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314                 for (i = 0; i < 5; i++) {
315                         val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
316                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317                                 break;
318                         udelay(5);
319                 }
320         } else {
321                 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
322                 BNX2_WR(bp, BNX2_CTX_DATA, val);
323         }
324         spin_unlock_bh(&bp->indirect_lock);
325 }
326
327 #ifdef BCM_CNIC
328 static int
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330 {
331         struct bnx2 *bp = netdev_priv(dev);
332         struct drv_ctl_io *io = &info->data.io;
333
334         switch (info->cmd) {
335         case DRV_CTL_IO_WR_CMD:
336                 bnx2_reg_wr_ind(bp, io->offset, io->data);
337                 break;
338         case DRV_CTL_IO_RD_CMD:
339                 io->data = bnx2_reg_rd_ind(bp, io->offset);
340                 break;
341         case DRV_CTL_CTX_WR_CMD:
342                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343                 break;
344         default:
345                 return -EINVAL;
346         }
347         return 0;
348 }
349
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351 {
352         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354         int sb_id;
355
356         if (bp->flags & BNX2_FLAG_USING_MSIX) {
357                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358                 bnapi->cnic_present = 0;
359                 sb_id = bp->irq_nvecs;
360                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361         } else {
362                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363                 bnapi->cnic_tag = bnapi->last_status_idx;
364                 bnapi->cnic_present = 1;
365                 sb_id = 0;
366                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367         }
368
369         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370         cp->irq_arr[0].status_blk = (void *)
371                 ((unsigned long) bnapi->status_blk.msi +
372                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373         cp->irq_arr[0].status_blk_num = sb_id;
374         cp->num_irq = 1;
375 }
376
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378                               void *data)
379 {
380         struct bnx2 *bp = netdev_priv(dev);
381         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382
383         if (ops == NULL)
384                 return -EINVAL;
385
386         if (cp->drv_state & CNIC_DRV_STATE_REGD)
387                 return -EBUSY;
388
389         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
390                 return -ENODEV;
391
392         bp->cnic_data = data;
393         rcu_assign_pointer(bp->cnic_ops, ops);
394
395         cp->num_irq = 0;
396         cp->drv_state = CNIC_DRV_STATE_REGD;
397
398         bnx2_setup_cnic_irq_info(bp);
399
400         return 0;
401 }
402
403 static int bnx2_unregister_cnic(struct net_device *dev)
404 {
405         struct bnx2 *bp = netdev_priv(dev);
406         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
407         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
408
409         mutex_lock(&bp->cnic_lock);
410         cp->drv_state = 0;
411         bnapi->cnic_present = 0;
412         RCU_INIT_POINTER(bp->cnic_ops, NULL);
413         mutex_unlock(&bp->cnic_lock);
414         synchronize_rcu();
415         return 0;
416 }
417
418 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
419 {
420         struct bnx2 *bp = netdev_priv(dev);
421         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
422
423         if (!cp->max_iscsi_conn)
424                 return NULL;
425
426         cp->drv_owner = THIS_MODULE;
427         cp->chip_id = bp->chip_id;
428         cp->pdev = bp->pdev;
429         cp->io_base = bp->regview;
430         cp->drv_ctl = bnx2_drv_ctl;
431         cp->drv_register_cnic = bnx2_register_cnic;
432         cp->drv_unregister_cnic = bnx2_unregister_cnic;
433
434         return cp;
435 }
436
437 static void
438 bnx2_cnic_stop(struct bnx2 *bp)
439 {
440         struct cnic_ops *c_ops;
441         struct cnic_ctl_info info;
442
443         mutex_lock(&bp->cnic_lock);
444         c_ops = rcu_dereference_protected(bp->cnic_ops,
445                                           lockdep_is_held(&bp->cnic_lock));
446         if (c_ops) {
447                 info.cmd = CNIC_CTL_STOP_CMD;
448                 c_ops->cnic_ctl(bp->cnic_data, &info);
449         }
450         mutex_unlock(&bp->cnic_lock);
451 }
452
453 static void
454 bnx2_cnic_start(struct bnx2 *bp)
455 {
456         struct cnic_ops *c_ops;
457         struct cnic_ctl_info info;
458
459         mutex_lock(&bp->cnic_lock);
460         c_ops = rcu_dereference_protected(bp->cnic_ops,
461                                           lockdep_is_held(&bp->cnic_lock));
462         if (c_ops) {
463                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
464                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
465
466                         bnapi->cnic_tag = bnapi->last_status_idx;
467                 }
468                 info.cmd = CNIC_CTL_START_CMD;
469                 c_ops->cnic_ctl(bp->cnic_data, &info);
470         }
471         mutex_unlock(&bp->cnic_lock);
472 }
473
474 #else
475
476 static void
477 bnx2_cnic_stop(struct bnx2 *bp)
478 {
479 }
480
481 static void
482 bnx2_cnic_start(struct bnx2 *bp)
483 {
484 }
485
486 #endif
487
488 static int
489 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
490 {
491         u32 val1;
492         int i, ret;
493
494         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
495                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
496                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
497
498                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
499                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
500
501                 udelay(40);
502         }
503
504         val1 = (bp->phy_addr << 21) | (reg << 16) |
505                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
506                 BNX2_EMAC_MDIO_COMM_START_BUSY;
507         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
508
509         for (i = 0; i < 50; i++) {
510                 udelay(10);
511
512                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
513                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
514                         udelay(5);
515
516                         val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
517                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
518
519                         break;
520                 }
521         }
522
523         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
524                 *val = 0x0;
525                 ret = -EBUSY;
526         }
527         else {
528                 *val = val1;
529                 ret = 0;
530         }
531
532         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
533                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
534                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
535
536                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
537                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
538
539                 udelay(40);
540         }
541
542         return ret;
543 }
544
545 static int
546 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
547 {
548         u32 val1;
549         int i, ret;
550
551         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
552                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
553                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
554
555                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
556                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
557
558                 udelay(40);
559         }
560
561         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
562                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
563                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
564         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
565
566         for (i = 0; i < 50; i++) {
567                 udelay(10);
568
569                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
570                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
571                         udelay(5);
572                         break;
573                 }
574         }
575
576         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
577                 ret = -EBUSY;
578         else
579                 ret = 0;
580
581         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
582                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
583                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
584
585                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
586                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
587
588                 udelay(40);
589         }
590
591         return ret;
592 }
593
594 static void
595 bnx2_disable_int(struct bnx2 *bp)
596 {
597         int i;
598         struct bnx2_napi *bnapi;
599
600         for (i = 0; i < bp->irq_nvecs; i++) {
601                 bnapi = &bp->bnx2_napi[i];
602                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
603                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
604         }
605         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
606 }
607
608 static void
609 bnx2_enable_int(struct bnx2 *bp)
610 {
611         int i;
612         struct bnx2_napi *bnapi;
613
614         for (i = 0; i < bp->irq_nvecs; i++) {
615                 bnapi = &bp->bnx2_napi[i];
616
617                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
618                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
619                         BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
620                         bnapi->last_status_idx);
621
622                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
623                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
624                         bnapi->last_status_idx);
625         }
626         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
627 }
628
629 static void
630 bnx2_disable_int_sync(struct bnx2 *bp)
631 {
632         int i;
633
634         atomic_inc(&bp->intr_sem);
635         if (!netif_running(bp->dev))
636                 return;
637
638         bnx2_disable_int(bp);
639         for (i = 0; i < bp->irq_nvecs; i++)
640                 synchronize_irq(bp->irq_tbl[i].vector);
641 }
642
643 static void
644 bnx2_napi_disable(struct bnx2 *bp)
645 {
646         int i;
647
648         for (i = 0; i < bp->irq_nvecs; i++)
649                 napi_disable(&bp->bnx2_napi[i].napi);
650 }
651
652 static void
653 bnx2_napi_enable(struct bnx2 *bp)
654 {
655         int i;
656
657         for (i = 0; i < bp->irq_nvecs; i++)
658                 napi_enable(&bp->bnx2_napi[i].napi);
659 }
660
661 static void
662 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
663 {
664         if (stop_cnic)
665                 bnx2_cnic_stop(bp);
666         if (netif_running(bp->dev)) {
667                 bnx2_napi_disable(bp);
668                 netif_tx_disable(bp->dev);
669         }
670         bnx2_disable_int_sync(bp);
671         netif_carrier_off(bp->dev);     /* prevent tx timeout */
672 }
673
674 static void
675 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
676 {
677         if (atomic_dec_and_test(&bp->intr_sem)) {
678                 if (netif_running(bp->dev)) {
679                         netif_tx_wake_all_queues(bp->dev);
680                         spin_lock_bh(&bp->phy_lock);
681                         if (bp->link_up)
682                                 netif_carrier_on(bp->dev);
683                         spin_unlock_bh(&bp->phy_lock);
684                         bnx2_napi_enable(bp);
685                         bnx2_enable_int(bp);
686                         if (start_cnic)
687                                 bnx2_cnic_start(bp);
688                 }
689         }
690 }
691
692 static void
693 bnx2_free_tx_mem(struct bnx2 *bp)
694 {
695         int i;
696
697         for (i = 0; i < bp->num_tx_rings; i++) {
698                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
699                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
700
701                 if (txr->tx_desc_ring) {
702                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
703                                           txr->tx_desc_ring,
704                                           txr->tx_desc_mapping);
705                         txr->tx_desc_ring = NULL;
706                 }
707                 kfree(txr->tx_buf_ring);
708                 txr->tx_buf_ring = NULL;
709         }
710 }
711
712 static void
713 bnx2_free_rx_mem(struct bnx2 *bp)
714 {
715         int i;
716
717         for (i = 0; i < bp->num_rx_rings; i++) {
718                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
719                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
720                 int j;
721
722                 for (j = 0; j < bp->rx_max_ring; j++) {
723                         if (rxr->rx_desc_ring[j])
724                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
725                                                   rxr->rx_desc_ring[j],
726                                                   rxr->rx_desc_mapping[j]);
727                         rxr->rx_desc_ring[j] = NULL;
728                 }
729                 vfree(rxr->rx_buf_ring);
730                 rxr->rx_buf_ring = NULL;
731
732                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
733                         if (rxr->rx_pg_desc_ring[j])
734                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
735                                                   rxr->rx_pg_desc_ring[j],
736                                                   rxr->rx_pg_desc_mapping[j]);
737                         rxr->rx_pg_desc_ring[j] = NULL;
738                 }
739                 vfree(rxr->rx_pg_ring);
740                 rxr->rx_pg_ring = NULL;
741         }
742 }
743
744 static int
745 bnx2_alloc_tx_mem(struct bnx2 *bp)
746 {
747         int i;
748
749         for (i = 0; i < bp->num_tx_rings; i++) {
750                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
751                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
752
753                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
754                 if (txr->tx_buf_ring == NULL)
755                         return -ENOMEM;
756
757                 txr->tx_desc_ring =
758                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
759                                            &txr->tx_desc_mapping, GFP_KERNEL);
760                 if (txr->tx_desc_ring == NULL)
761                         return -ENOMEM;
762         }
763         return 0;
764 }
765
766 static int
767 bnx2_alloc_rx_mem(struct bnx2 *bp)
768 {
769         int i;
770
771         for (i = 0; i < bp->num_rx_rings; i++) {
772                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
773                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
774                 int j;
775
776                 rxr->rx_buf_ring =
777                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
778                 if (rxr->rx_buf_ring == NULL)
779                         return -ENOMEM;
780
781                 for (j = 0; j < bp->rx_max_ring; j++) {
782                         rxr->rx_desc_ring[j] =
783                                 dma_alloc_coherent(&bp->pdev->dev,
784                                                    RXBD_RING_SIZE,
785                                                    &rxr->rx_desc_mapping[j],
786                                                    GFP_KERNEL);
787                         if (rxr->rx_desc_ring[j] == NULL)
788                                 return -ENOMEM;
789
790                 }
791
792                 if (bp->rx_pg_ring_size) {
793                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
794                                                   bp->rx_max_pg_ring);
795                         if (rxr->rx_pg_ring == NULL)
796                                 return -ENOMEM;
797
798                 }
799
800                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
801                         rxr->rx_pg_desc_ring[j] =
802                                 dma_alloc_coherent(&bp->pdev->dev,
803                                                    RXBD_RING_SIZE,
804                                                    &rxr->rx_pg_desc_mapping[j],
805                                                    GFP_KERNEL);
806                         if (rxr->rx_pg_desc_ring[j] == NULL)
807                                 return -ENOMEM;
808
809                 }
810         }
811         return 0;
812 }
813
814 static void
815 bnx2_free_mem(struct bnx2 *bp)
816 {
817         int i;
818         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
819
820         bnx2_free_tx_mem(bp);
821         bnx2_free_rx_mem(bp);
822
823         for (i = 0; i < bp->ctx_pages; i++) {
824                 if (bp->ctx_blk[i]) {
825                         dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
826                                           bp->ctx_blk[i],
827                                           bp->ctx_blk_mapping[i]);
828                         bp->ctx_blk[i] = NULL;
829                 }
830         }
831         if (bnapi->status_blk.msi) {
832                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
833                                   bnapi->status_blk.msi,
834                                   bp->status_blk_mapping);
835                 bnapi->status_blk.msi = NULL;
836                 bp->stats_blk = NULL;
837         }
838 }
839
840 static int
841 bnx2_alloc_mem(struct bnx2 *bp)
842 {
843         int i, status_blk_size, err;
844         struct bnx2_napi *bnapi;
845         void *status_blk;
846
847         /* Combine status and statistics blocks into one allocation. */
848         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
849         if (bp->flags & BNX2_FLAG_MSIX_CAP)
850                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
851                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
852         bp->status_stats_size = status_blk_size +
853                                 sizeof(struct statistics_block);
854
855         status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
856                                          &bp->status_blk_mapping, GFP_KERNEL);
857         if (status_blk == NULL)
858                 goto alloc_mem_err;
859
860         bnapi = &bp->bnx2_napi[0];
861         bnapi->status_blk.msi = status_blk;
862         bnapi->hw_tx_cons_ptr =
863                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
864         bnapi->hw_rx_cons_ptr =
865                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
866         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
867                 for (i = 1; i < bp->irq_nvecs; i++) {
868                         struct status_block_msix *sblk;
869
870                         bnapi = &bp->bnx2_napi[i];
871
872                         sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
873                         bnapi->status_blk.msix = sblk;
874                         bnapi->hw_tx_cons_ptr =
875                                 &sblk->status_tx_quick_consumer_index;
876                         bnapi->hw_rx_cons_ptr =
877                                 &sblk->status_rx_quick_consumer_index;
878                         bnapi->int_num = i << 24;
879                 }
880         }
881
882         bp->stats_blk = status_blk + status_blk_size;
883
884         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
885
886         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
887                 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
888                 if (bp->ctx_pages == 0)
889                         bp->ctx_pages = 1;
890                 for (i = 0; i < bp->ctx_pages; i++) {
891                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
892                                                 BNX2_PAGE_SIZE,
893                                                 &bp->ctx_blk_mapping[i],
894                                                 GFP_KERNEL);
895                         if (bp->ctx_blk[i] == NULL)
896                                 goto alloc_mem_err;
897                 }
898         }
899
900         err = bnx2_alloc_rx_mem(bp);
901         if (err)
902                 goto alloc_mem_err;
903
904         err = bnx2_alloc_tx_mem(bp);
905         if (err)
906                 goto alloc_mem_err;
907
908         return 0;
909
910 alloc_mem_err:
911         bnx2_free_mem(bp);
912         return -ENOMEM;
913 }
914
915 static void
916 bnx2_report_fw_link(struct bnx2 *bp)
917 {
918         u32 fw_link_status = 0;
919
920         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
921                 return;
922
923         if (bp->link_up) {
924                 u32 bmsr;
925
926                 switch (bp->line_speed) {
927                 case SPEED_10:
928                         if (bp->duplex == DUPLEX_HALF)
929                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
930                         else
931                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
932                         break;
933                 case SPEED_100:
934                         if (bp->duplex == DUPLEX_HALF)
935                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
936                         else
937                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
938                         break;
939                 case SPEED_1000:
940                         if (bp->duplex == DUPLEX_HALF)
941                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
942                         else
943                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
944                         break;
945                 case SPEED_2500:
946                         if (bp->duplex == DUPLEX_HALF)
947                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
948                         else
949                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
950                         break;
951                 }
952
953                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
954
955                 if (bp->autoneg) {
956                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
957
958                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
959                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
960
961                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
962                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
963                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
964                         else
965                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
966                 }
967         }
968         else
969                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
970
971         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
972 }
973
974 static char *
975 bnx2_xceiver_str(struct bnx2 *bp)
976 {
977         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
978                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
979                  "Copper");
980 }
981
982 static void
983 bnx2_report_link(struct bnx2 *bp)
984 {
985         if (bp->link_up) {
986                 netif_carrier_on(bp->dev);
987                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
988                             bnx2_xceiver_str(bp),
989                             bp->line_speed,
990                             bp->duplex == DUPLEX_FULL ? "full" : "half");
991
992                 if (bp->flow_ctrl) {
993                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
994                                 pr_cont(", receive ");
995                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
996                                         pr_cont("& transmit ");
997                         }
998                         else {
999                                 pr_cont(", transmit ");
1000                         }
1001                         pr_cont("flow control ON");
1002                 }
1003                 pr_cont("\n");
1004         } else {
1005                 netif_carrier_off(bp->dev);
1006                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1007                            bnx2_xceiver_str(bp));
1008         }
1009
1010         bnx2_report_fw_link(bp);
1011 }
1012
1013 static void
1014 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1015 {
1016         u32 local_adv, remote_adv;
1017
1018         bp->flow_ctrl = 0;
1019         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1020                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1021
1022                 if (bp->duplex == DUPLEX_FULL) {
1023                         bp->flow_ctrl = bp->req_flow_ctrl;
1024                 }
1025                 return;
1026         }
1027
1028         if (bp->duplex != DUPLEX_FULL) {
1029                 return;
1030         }
1031
1032         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1033             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1034                 u32 val;
1035
1036                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1037                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1038                         bp->flow_ctrl |= FLOW_CTRL_TX;
1039                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1040                         bp->flow_ctrl |= FLOW_CTRL_RX;
1041                 return;
1042         }
1043
1044         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1045         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1046
1047         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1048                 u32 new_local_adv = 0;
1049                 u32 new_remote_adv = 0;
1050
1051                 if (local_adv & ADVERTISE_1000XPAUSE)
1052                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1053                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1054                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1055                 if (remote_adv & ADVERTISE_1000XPAUSE)
1056                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1057                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1058                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1059
1060                 local_adv = new_local_adv;
1061                 remote_adv = new_remote_adv;
1062         }
1063
1064         /* See Table 28B-3 of 802.3ab-1999 spec. */
1065         if (local_adv & ADVERTISE_PAUSE_CAP) {
1066                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1067                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1068                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1069                         }
1070                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1071                                 bp->flow_ctrl = FLOW_CTRL_RX;
1072                         }
1073                 }
1074                 else {
1075                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1076                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1077                         }
1078                 }
1079         }
1080         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1081                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1082                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1083
1084                         bp->flow_ctrl = FLOW_CTRL_TX;
1085                 }
1086         }
1087 }
1088
1089 static int
1090 bnx2_5709s_linkup(struct bnx2 *bp)
1091 {
1092         u32 val, speed;
1093
1094         bp->link_up = 1;
1095
1096         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1097         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1098         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1099
1100         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1101                 bp->line_speed = bp->req_line_speed;
1102                 bp->duplex = bp->req_duplex;
1103                 return 0;
1104         }
1105         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1106         switch (speed) {
1107                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1108                         bp->line_speed = SPEED_10;
1109                         break;
1110                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1111                         bp->line_speed = SPEED_100;
1112                         break;
1113                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1114                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1115                         bp->line_speed = SPEED_1000;
1116                         break;
1117                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1118                         bp->line_speed = SPEED_2500;
1119                         break;
1120         }
1121         if (val & MII_BNX2_GP_TOP_AN_FD)
1122                 bp->duplex = DUPLEX_FULL;
1123         else
1124                 bp->duplex = DUPLEX_HALF;
1125         return 0;
1126 }
1127
1128 static int
1129 bnx2_5708s_linkup(struct bnx2 *bp)
1130 {
1131         u32 val;
1132
1133         bp->link_up = 1;
1134         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1135         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1136                 case BCM5708S_1000X_STAT1_SPEED_10:
1137                         bp->line_speed = SPEED_10;
1138                         break;
1139                 case BCM5708S_1000X_STAT1_SPEED_100:
1140                         bp->line_speed = SPEED_100;
1141                         break;
1142                 case BCM5708S_1000X_STAT1_SPEED_1G:
1143                         bp->line_speed = SPEED_1000;
1144                         break;
1145                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1146                         bp->line_speed = SPEED_2500;
1147                         break;
1148         }
1149         if (val & BCM5708S_1000X_STAT1_FD)
1150                 bp->duplex = DUPLEX_FULL;
1151         else
1152                 bp->duplex = DUPLEX_HALF;
1153
1154         return 0;
1155 }
1156
1157 static int
1158 bnx2_5706s_linkup(struct bnx2 *bp)
1159 {
1160         u32 bmcr, local_adv, remote_adv, common;
1161
1162         bp->link_up = 1;
1163         bp->line_speed = SPEED_1000;
1164
1165         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1166         if (bmcr & BMCR_FULLDPLX) {
1167                 bp->duplex = DUPLEX_FULL;
1168         }
1169         else {
1170                 bp->duplex = DUPLEX_HALF;
1171         }
1172
1173         if (!(bmcr & BMCR_ANENABLE)) {
1174                 return 0;
1175         }
1176
1177         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1178         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1179
1180         common = local_adv & remote_adv;
1181         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1182
1183                 if (common & ADVERTISE_1000XFULL) {
1184                         bp->duplex = DUPLEX_FULL;
1185                 }
1186                 else {
1187                         bp->duplex = DUPLEX_HALF;
1188                 }
1189         }
1190
1191         return 0;
1192 }
1193
1194 static int
1195 bnx2_copper_linkup(struct bnx2 *bp)
1196 {
1197         u32 bmcr;
1198
1199         bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1200
1201         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1202         if (bmcr & BMCR_ANENABLE) {
1203                 u32 local_adv, remote_adv, common;
1204
1205                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1206                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1207
1208                 common = local_adv & (remote_adv >> 2);
1209                 if (common & ADVERTISE_1000FULL) {
1210                         bp->line_speed = SPEED_1000;
1211                         bp->duplex = DUPLEX_FULL;
1212                 }
1213                 else if (common & ADVERTISE_1000HALF) {
1214                         bp->line_speed = SPEED_1000;
1215                         bp->duplex = DUPLEX_HALF;
1216                 }
1217                 else {
1218                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1219                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1220
1221                         common = local_adv & remote_adv;
1222                         if (common & ADVERTISE_100FULL) {
1223                                 bp->line_speed = SPEED_100;
1224                                 bp->duplex = DUPLEX_FULL;
1225                         }
1226                         else if (common & ADVERTISE_100HALF) {
1227                                 bp->line_speed = SPEED_100;
1228                                 bp->duplex = DUPLEX_HALF;
1229                         }
1230                         else if (common & ADVERTISE_10FULL) {
1231                                 bp->line_speed = SPEED_10;
1232                                 bp->duplex = DUPLEX_FULL;
1233                         }
1234                         else if (common & ADVERTISE_10HALF) {
1235                                 bp->line_speed = SPEED_10;
1236                                 bp->duplex = DUPLEX_HALF;
1237                         }
1238                         else {
1239                                 bp->line_speed = 0;
1240                                 bp->link_up = 0;
1241                         }
1242                 }
1243         }
1244         else {
1245                 if (bmcr & BMCR_SPEED100) {
1246                         bp->line_speed = SPEED_100;
1247                 }
1248                 else {
1249                         bp->line_speed = SPEED_10;
1250                 }
1251                 if (bmcr & BMCR_FULLDPLX) {
1252                         bp->duplex = DUPLEX_FULL;
1253                 }
1254                 else {
1255                         bp->duplex = DUPLEX_HALF;
1256                 }
1257         }
1258
1259         if (bp->link_up) {
1260                 u32 ext_status;
1261
1262                 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1263                 if (ext_status & EXT_STATUS_MDIX)
1264                         bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1265         }
1266
1267         return 0;
1268 }
1269
1270 static void
1271 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1272 {
1273         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1274
1275         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1276         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1277         val |= 0x02 << 8;
1278
1279         if (bp->flow_ctrl & FLOW_CTRL_TX)
1280                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1281
1282         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1283 }
1284
1285 static void
1286 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1287 {
1288         int i;
1289         u32 cid;
1290
1291         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1292                 if (i == 1)
1293                         cid = RX_RSS_CID;
1294                 bnx2_init_rx_context(bp, cid);
1295         }
1296 }
1297
1298 static void
1299 bnx2_set_mac_link(struct bnx2 *bp)
1300 {
1301         u32 val;
1302
1303         BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1304         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1305                 (bp->duplex == DUPLEX_HALF)) {
1306                 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1307         }
1308
1309         /* Configure the EMAC mode register. */
1310         val = BNX2_RD(bp, BNX2_EMAC_MODE);
1311
1312         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1313                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1314                 BNX2_EMAC_MODE_25G_MODE);
1315
1316         if (bp->link_up) {
1317                 switch (bp->line_speed) {
1318                         case SPEED_10:
1319                                 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1320                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1321                                         break;
1322                                 }
1323                                 /* fall through */
1324                         case SPEED_100:
1325                                 val |= BNX2_EMAC_MODE_PORT_MII;
1326                                 break;
1327                         case SPEED_2500:
1328                                 val |= BNX2_EMAC_MODE_25G_MODE;
1329                                 /* fall through */
1330                         case SPEED_1000:
1331                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1332                                 break;
1333                 }
1334         }
1335         else {
1336                 val |= BNX2_EMAC_MODE_PORT_GMII;
1337         }
1338
1339         /* Set the MAC to operate in the appropriate duplex mode. */
1340         if (bp->duplex == DUPLEX_HALF)
1341                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1342         BNX2_WR(bp, BNX2_EMAC_MODE, val);
1343
1344         /* Enable/disable rx PAUSE. */
1345         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1346
1347         if (bp->flow_ctrl & FLOW_CTRL_RX)
1348                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1349         BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1350
1351         /* Enable/disable tx PAUSE. */
1352         val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1353         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1354
1355         if (bp->flow_ctrl & FLOW_CTRL_TX)
1356                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1357         BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1358
1359         /* Acknowledge the interrupt. */
1360         BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1361
1362         bnx2_init_all_rx_contexts(bp);
1363 }
1364
1365 static void
1366 bnx2_enable_bmsr1(struct bnx2 *bp)
1367 {
1368         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1369             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1370                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1371                                MII_BNX2_BLK_ADDR_GP_STATUS);
1372 }
1373
1374 static void
1375 bnx2_disable_bmsr1(struct bnx2 *bp)
1376 {
1377         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1378             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1379                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1380                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1381 }
1382
1383 static int
1384 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1385 {
1386         u32 up1;
1387         int ret = 1;
1388
1389         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1390                 return 0;
1391
1392         if (bp->autoneg & AUTONEG_SPEED)
1393                 bp->advertising |= ADVERTISED_2500baseX_Full;
1394
1395         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1396                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1397
1398         bnx2_read_phy(bp, bp->mii_up1, &up1);
1399         if (!(up1 & BCM5708S_UP1_2G5)) {
1400                 up1 |= BCM5708S_UP1_2G5;
1401                 bnx2_write_phy(bp, bp->mii_up1, up1);
1402                 ret = 0;
1403         }
1404
1405         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1406                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1407                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1408
1409         return ret;
1410 }
1411
1412 static int
1413 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1414 {
1415         u32 up1;
1416         int ret = 0;
1417
1418         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1419                 return 0;
1420
1421         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1422                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1423
1424         bnx2_read_phy(bp, bp->mii_up1, &up1);
1425         if (up1 & BCM5708S_UP1_2G5) {
1426                 up1 &= ~BCM5708S_UP1_2G5;
1427                 bnx2_write_phy(bp, bp->mii_up1, up1);
1428                 ret = 1;
1429         }
1430
1431         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1432                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1433                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1434
1435         return ret;
1436 }
1437
1438 static void
1439 bnx2_enable_forced_2g5(struct bnx2 *bp)
1440 {
1441         u32 uninitialized_var(bmcr);
1442         int err;
1443
1444         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1445                 return;
1446
1447         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1448                 u32 val;
1449
1450                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1451                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1452                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1453                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1454                         val |= MII_BNX2_SD_MISC1_FORCE |
1455                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1456                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1457                 }
1458
1459                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1460                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1461                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1462
1463         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1464                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1465                 if (!err)
1466                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1467         } else {
1468                 return;
1469         }
1470
1471         if (err)
1472                 return;
1473
1474         if (bp->autoneg & AUTONEG_SPEED) {
1475                 bmcr &= ~BMCR_ANENABLE;
1476                 if (bp->req_duplex == DUPLEX_FULL)
1477                         bmcr |= BMCR_FULLDPLX;
1478         }
1479         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1480 }
1481
1482 static void
1483 bnx2_disable_forced_2g5(struct bnx2 *bp)
1484 {
1485         u32 uninitialized_var(bmcr);
1486         int err;
1487
1488         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1489                 return;
1490
1491         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1492                 u32 val;
1493
1494                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1496                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1497                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1498                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1499                 }
1500
1501                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1502                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1503                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1504
1505         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1506                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1507                 if (!err)
1508                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1509         } else {
1510                 return;
1511         }
1512
1513         if (err)
1514                 return;
1515
1516         if (bp->autoneg & AUTONEG_SPEED)
1517                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1518         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1519 }
1520
1521 static void
1522 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1523 {
1524         u32 val;
1525
1526         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1527         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1528         if (start)
1529                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1530         else
1531                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1532 }
1533
1534 static int
1535 bnx2_set_link(struct bnx2 *bp)
1536 {
1537         u32 bmsr;
1538         u8 link_up;
1539
1540         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1541                 bp->link_up = 1;
1542                 return 0;
1543         }
1544
1545         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1546                 return 0;
1547
1548         link_up = bp->link_up;
1549
1550         bnx2_enable_bmsr1(bp);
1551         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1552         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1553         bnx2_disable_bmsr1(bp);
1554
1555         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1556             (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1557                 u32 val, an_dbg;
1558
1559                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1560                         bnx2_5706s_force_link_dn(bp, 0);
1561                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1562                 }
1563                 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1564
1565                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1566                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1567                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1568
1569                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1570                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1571                         bmsr |= BMSR_LSTATUS;
1572                 else
1573                         bmsr &= ~BMSR_LSTATUS;
1574         }
1575
1576         if (bmsr & BMSR_LSTATUS) {
1577                 bp->link_up = 1;
1578
1579                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1580                         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1581                                 bnx2_5706s_linkup(bp);
1582                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1583                                 bnx2_5708s_linkup(bp);
1584                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1585                                 bnx2_5709s_linkup(bp);
1586                 }
1587                 else {
1588                         bnx2_copper_linkup(bp);
1589                 }
1590                 bnx2_resolve_flow_ctrl(bp);
1591         }
1592         else {
1593                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1594                     (bp->autoneg & AUTONEG_SPEED))
1595                         bnx2_disable_forced_2g5(bp);
1596
1597                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1598                         u32 bmcr;
1599
1600                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1601                         bmcr |= BMCR_ANENABLE;
1602                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1603
1604                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1605                 }
1606                 bp->link_up = 0;
1607         }
1608
1609         if (bp->link_up != link_up) {
1610                 bnx2_report_link(bp);
1611         }
1612
1613         bnx2_set_mac_link(bp);
1614
1615         return 0;
1616 }
1617
1618 static int
1619 bnx2_reset_phy(struct bnx2 *bp)
1620 {
1621         int i;
1622         u32 reg;
1623
1624         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1625
1626 #define PHY_RESET_MAX_WAIT 100
1627         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1628                 udelay(10);
1629
1630                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1631                 if (!(reg & BMCR_RESET)) {
1632                         udelay(20);
1633                         break;
1634                 }
1635         }
1636         if (i == PHY_RESET_MAX_WAIT) {
1637                 return -EBUSY;
1638         }
1639         return 0;
1640 }
1641
1642 static u32
1643 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1644 {
1645         u32 adv = 0;
1646
1647         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1648                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1649
1650                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1651                         adv = ADVERTISE_1000XPAUSE;
1652                 }
1653                 else {
1654                         adv = ADVERTISE_PAUSE_CAP;
1655                 }
1656         }
1657         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1658                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659                         adv = ADVERTISE_1000XPSE_ASYM;
1660                 }
1661                 else {
1662                         adv = ADVERTISE_PAUSE_ASYM;
1663                 }
1664         }
1665         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1666                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1667                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1668                 }
1669                 else {
1670                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1671                 }
1672         }
1673         return adv;
1674 }
1675
1676 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1677
1678 static int
1679 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1680 __releases(&bp->phy_lock)
1681 __acquires(&bp->phy_lock)
1682 {
1683         u32 speed_arg = 0, pause_adv;
1684
1685         pause_adv = bnx2_phy_get_pause_adv(bp);
1686
1687         if (bp->autoneg & AUTONEG_SPEED) {
1688                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1689                 if (bp->advertising & ADVERTISED_10baseT_Half)
1690                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1691                 if (bp->advertising & ADVERTISED_10baseT_Full)
1692                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1693                 if (bp->advertising & ADVERTISED_100baseT_Half)
1694                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1695                 if (bp->advertising & ADVERTISED_100baseT_Full)
1696                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1697                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1698                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1700                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1701         } else {
1702                 if (bp->req_line_speed == SPEED_2500)
1703                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1704                 else if (bp->req_line_speed == SPEED_1000)
1705                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1706                 else if (bp->req_line_speed == SPEED_100) {
1707                         if (bp->req_duplex == DUPLEX_FULL)
1708                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1709                         else
1710                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1711                 } else if (bp->req_line_speed == SPEED_10) {
1712                         if (bp->req_duplex == DUPLEX_FULL)
1713                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1714                         else
1715                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1716                 }
1717         }
1718
1719         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1720                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1721         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1722                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1723
1724         if (port == PORT_TP)
1725                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1726                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1727
1728         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1729
1730         spin_unlock_bh(&bp->phy_lock);
1731         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1732         spin_lock_bh(&bp->phy_lock);
1733
1734         return 0;
1735 }
1736
1737 static int
1738 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1739 __releases(&bp->phy_lock)
1740 __acquires(&bp->phy_lock)
1741 {
1742         u32 adv, bmcr;
1743         u32 new_adv = 0;
1744
1745         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1746                 return bnx2_setup_remote_phy(bp, port);
1747
1748         if (!(bp->autoneg & AUTONEG_SPEED)) {
1749                 u32 new_bmcr;
1750                 int force_link_down = 0;
1751
1752                 if (bp->req_line_speed == SPEED_2500) {
1753                         if (!bnx2_test_and_enable_2g5(bp))
1754                                 force_link_down = 1;
1755                 } else if (bp->req_line_speed == SPEED_1000) {
1756                         if (bnx2_test_and_disable_2g5(bp))
1757                                 force_link_down = 1;
1758                 }
1759                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1760                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1761
1762                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1763                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1764                 new_bmcr |= BMCR_SPEED1000;
1765
1766                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1767                         if (bp->req_line_speed == SPEED_2500)
1768                                 bnx2_enable_forced_2g5(bp);
1769                         else if (bp->req_line_speed == SPEED_1000) {
1770                                 bnx2_disable_forced_2g5(bp);
1771                                 new_bmcr &= ~0x2000;
1772                         }
1773
1774                 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1775                         if (bp->req_line_speed == SPEED_2500)
1776                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1777                         else
1778                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1779                 }
1780
1781                 if (bp->req_duplex == DUPLEX_FULL) {
1782                         adv |= ADVERTISE_1000XFULL;
1783                         new_bmcr |= BMCR_FULLDPLX;
1784                 }
1785                 else {
1786                         adv |= ADVERTISE_1000XHALF;
1787                         new_bmcr &= ~BMCR_FULLDPLX;
1788                 }
1789                 if ((new_bmcr != bmcr) || (force_link_down)) {
1790                         /* Force a link down visible on the other side */
1791                         if (bp->link_up) {
1792                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1793                                                ~(ADVERTISE_1000XFULL |
1794                                                  ADVERTISE_1000XHALF));
1795                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1796                                         BMCR_ANRESTART | BMCR_ANENABLE);
1797
1798                                 bp->link_up = 0;
1799                                 netif_carrier_off(bp->dev);
1800                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1801                                 bnx2_report_link(bp);
1802                         }
1803                         bnx2_write_phy(bp, bp->mii_adv, adv);
1804                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1805                 } else {
1806                         bnx2_resolve_flow_ctrl(bp);
1807                         bnx2_set_mac_link(bp);
1808                 }
1809                 return 0;
1810         }
1811
1812         bnx2_test_and_enable_2g5(bp);
1813
1814         if (bp->advertising & ADVERTISED_1000baseT_Full)
1815                 new_adv |= ADVERTISE_1000XFULL;
1816
1817         new_adv |= bnx2_phy_get_pause_adv(bp);
1818
1819         bnx2_read_phy(bp, bp->mii_adv, &adv);
1820         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1821
1822         bp->serdes_an_pending = 0;
1823         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1824                 /* Force a link down visible on the other side */
1825                 if (bp->link_up) {
1826                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1827                         spin_unlock_bh(&bp->phy_lock);
1828                         msleep(20);
1829                         spin_lock_bh(&bp->phy_lock);
1830                 }
1831
1832                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1833                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1834                         BMCR_ANENABLE);
1835                 /* Speed up link-up time when the link partner
1836                  * does not autonegotiate which is very common
1837                  * in blade servers. Some blade servers use
1838                  * IPMI for kerboard input and it's important
1839                  * to minimize link disruptions. Autoneg. involves
1840                  * exchanging base pages plus 3 next pages and
1841                  * normally completes in about 120 msec.
1842                  */
1843                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1844                 bp->serdes_an_pending = 1;
1845                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1846         } else {
1847                 bnx2_resolve_flow_ctrl(bp);
1848                 bnx2_set_mac_link(bp);
1849         }
1850
1851         return 0;
1852 }
1853
1854 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1855         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1856                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1857                 (ADVERTISED_1000baseT_Full)
1858
1859 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1860         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1861         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1862         ADVERTISED_1000baseT_Full)
1863
1864 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1865         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1866
1867 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1868
1869 static void
1870 bnx2_set_default_remote_link(struct bnx2 *bp)
1871 {
1872         u32 link;
1873
1874         if (bp->phy_port == PORT_TP)
1875                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1876         else
1877                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1878
1879         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1880                 bp->req_line_speed = 0;
1881                 bp->autoneg |= AUTONEG_SPEED;
1882                 bp->advertising = ADVERTISED_Autoneg;
1883                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1884                         bp->advertising |= ADVERTISED_10baseT_Half;
1885                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1886                         bp->advertising |= ADVERTISED_10baseT_Full;
1887                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1888                         bp->advertising |= ADVERTISED_100baseT_Half;
1889                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1890                         bp->advertising |= ADVERTISED_100baseT_Full;
1891                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1892                         bp->advertising |= ADVERTISED_1000baseT_Full;
1893                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1894                         bp->advertising |= ADVERTISED_2500baseX_Full;
1895         } else {
1896                 bp->autoneg = 0;
1897                 bp->advertising = 0;
1898                 bp->req_duplex = DUPLEX_FULL;
1899                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1900                         bp->req_line_speed = SPEED_10;
1901                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1902                                 bp->req_duplex = DUPLEX_HALF;
1903                 }
1904                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1905                         bp->req_line_speed = SPEED_100;
1906                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1907                                 bp->req_duplex = DUPLEX_HALF;
1908                 }
1909                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1910                         bp->req_line_speed = SPEED_1000;
1911                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1912                         bp->req_line_speed = SPEED_2500;
1913         }
1914 }
1915
1916 static void
1917 bnx2_set_default_link(struct bnx2 *bp)
1918 {
1919         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1920                 bnx2_set_default_remote_link(bp);
1921                 return;
1922         }
1923
1924         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1925         bp->req_line_speed = 0;
1926         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1927                 u32 reg;
1928
1929                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1930
1931                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1932                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1933                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1934                         bp->autoneg = 0;
1935                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1936                         bp->req_duplex = DUPLEX_FULL;
1937                 }
1938         } else
1939                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1940 }
1941
1942 static void
1943 bnx2_send_heart_beat(struct bnx2 *bp)
1944 {
1945         u32 msg;
1946         u32 addr;
1947
1948         spin_lock(&bp->indirect_lock);
1949         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1950         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1951         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1952         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1953         spin_unlock(&bp->indirect_lock);
1954 }
1955
1956 static void
1957 bnx2_remote_phy_event(struct bnx2 *bp)
1958 {
1959         u32 msg;
1960         u8 link_up = bp->link_up;
1961         u8 old_port;
1962
1963         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1964
1965         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1966                 bnx2_send_heart_beat(bp);
1967
1968         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1969
1970         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1971                 bp->link_up = 0;
1972         else {
1973                 u32 speed;
1974
1975                 bp->link_up = 1;
1976                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1977                 bp->duplex = DUPLEX_FULL;
1978                 switch (speed) {
1979                         case BNX2_LINK_STATUS_10HALF:
1980                                 bp->duplex = DUPLEX_HALF;
1981                                 /* fall through */
1982                         case BNX2_LINK_STATUS_10FULL:
1983                                 bp->line_speed = SPEED_10;
1984                                 break;
1985                         case BNX2_LINK_STATUS_100HALF:
1986                                 bp->duplex = DUPLEX_HALF;
1987                                 /* fall through */
1988                         case BNX2_LINK_STATUS_100BASE_T4:
1989                         case BNX2_LINK_STATUS_100FULL:
1990                                 bp->line_speed = SPEED_100;
1991                                 break;
1992                         case BNX2_LINK_STATUS_1000HALF:
1993                                 bp->duplex = DUPLEX_HALF;
1994                                 /* fall through */
1995                         case BNX2_LINK_STATUS_1000FULL:
1996                                 bp->line_speed = SPEED_1000;
1997                                 break;
1998                         case BNX2_LINK_STATUS_2500HALF:
1999                                 bp->duplex = DUPLEX_HALF;
2000                                 /* fall through */
2001                         case BNX2_LINK_STATUS_2500FULL:
2002                                 bp->line_speed = SPEED_2500;
2003                                 break;
2004                         default:
2005                                 bp->line_speed = 0;
2006                                 break;
2007                 }
2008
2009                 bp->flow_ctrl = 0;
2010                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2011                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2012                         if (bp->duplex == DUPLEX_FULL)
2013                                 bp->flow_ctrl = bp->req_flow_ctrl;
2014                 } else {
2015                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2016                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2017                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2018                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2019                 }
2020
2021                 old_port = bp->phy_port;
2022                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2023                         bp->phy_port = PORT_FIBRE;
2024                 else
2025                         bp->phy_port = PORT_TP;
2026
2027                 if (old_port != bp->phy_port)
2028                         bnx2_set_default_link(bp);
2029
2030         }
2031         if (bp->link_up != link_up)
2032                 bnx2_report_link(bp);
2033
2034         bnx2_set_mac_link(bp);
2035 }
2036
2037 static int
2038 bnx2_set_remote_link(struct bnx2 *bp)
2039 {
2040         u32 evt_code;
2041
2042         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2043         switch (evt_code) {
2044                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2045                         bnx2_remote_phy_event(bp);
2046                         break;
2047                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2048                 default:
2049                         bnx2_send_heart_beat(bp);
2050                         break;
2051         }
2052         return 0;
2053 }
2054
2055 static int
2056 bnx2_setup_copper_phy(struct bnx2 *bp)
2057 __releases(&bp->phy_lock)
2058 __acquires(&bp->phy_lock)
2059 {
2060         u32 bmcr, adv_reg, new_adv = 0;
2061         u32 new_bmcr;
2062
2063         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2064
2065         bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2066         adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2067                     ADVERTISE_PAUSE_ASYM);
2068
2069         new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2070
2071         if (bp->autoneg & AUTONEG_SPEED) {
2072                 u32 adv1000_reg;
2073                 u32 new_adv1000 = 0;
2074
2075                 new_adv |= bnx2_phy_get_pause_adv(bp);
2076
2077                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2078                 adv1000_reg &= PHY_ALL_1000_SPEED;
2079
2080                 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2081                 if ((adv1000_reg != new_adv1000) ||
2082                         (adv_reg != new_adv) ||
2083                         ((bmcr & BMCR_ANENABLE) == 0)) {
2084
2085                         bnx2_write_phy(bp, bp->mii_adv, new_adv);
2086                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2087                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2088                                 BMCR_ANENABLE);
2089                 }
2090                 else if (bp->link_up) {
2091                         /* Flow ctrl may have changed from auto to forced */
2092                         /* or vice-versa. */
2093
2094                         bnx2_resolve_flow_ctrl(bp);
2095                         bnx2_set_mac_link(bp);
2096                 }
2097                 return 0;
2098         }
2099
2100         /* advertise nothing when forcing speed */
2101         if (adv_reg != new_adv)
2102                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2103
2104         new_bmcr = 0;
2105         if (bp->req_line_speed == SPEED_100) {
2106                 new_bmcr |= BMCR_SPEED100;
2107         }
2108         if (bp->req_duplex == DUPLEX_FULL) {
2109                 new_bmcr |= BMCR_FULLDPLX;
2110         }
2111         if (new_bmcr != bmcr) {
2112                 u32 bmsr;
2113
2114                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116
2117                 if (bmsr & BMSR_LSTATUS) {
2118                         /* Force link down */
2119                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2120                         spin_unlock_bh(&bp->phy_lock);
2121                         msleep(50);
2122                         spin_lock_bh(&bp->phy_lock);
2123
2124                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2125                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126                 }
2127
2128                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2129
2130                 /* Normally, the new speed is setup after the link has
2131                  * gone down and up again. In some cases, link will not go
2132                  * down so we need to set up the new speed here.
2133                  */
2134                 if (bmsr & BMSR_LSTATUS) {
2135                         bp->line_speed = bp->req_line_speed;
2136                         bp->duplex = bp->req_duplex;
2137                         bnx2_resolve_flow_ctrl(bp);
2138                         bnx2_set_mac_link(bp);
2139                 }
2140         } else {
2141                 bnx2_resolve_flow_ctrl(bp);
2142                 bnx2_set_mac_link(bp);
2143         }
2144         return 0;
2145 }
2146
2147 static int
2148 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2149 __releases(&bp->phy_lock)
2150 __acquires(&bp->phy_lock)
2151 {
2152         if (bp->loopback == MAC_LOOPBACK)
2153                 return 0;
2154
2155         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2156                 return bnx2_setup_serdes_phy(bp, port);
2157         }
2158         else {
2159                 return bnx2_setup_copper_phy(bp);
2160         }
2161 }
2162
2163 static int
2164 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2165 {
2166         u32 val;
2167
2168         bp->mii_bmcr = MII_BMCR + 0x10;
2169         bp->mii_bmsr = MII_BMSR + 0x10;
2170         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2171         bp->mii_adv = MII_ADVERTISE + 0x10;
2172         bp->mii_lpa = MII_LPA + 0x10;
2173         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2174
2175         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2176         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2177
2178         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2179         if (reset_phy)
2180                 bnx2_reset_phy(bp);
2181
2182         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2183
2184         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2185         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2186         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2187         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2188
2189         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2190         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2191         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2192                 val |= BCM5708S_UP1_2G5;
2193         else
2194                 val &= ~BCM5708S_UP1_2G5;
2195         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2196
2197         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2198         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2199         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2200         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2201
2202         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2203
2204         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2205               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2206         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2207
2208         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2209
2210         return 0;
2211 }
2212
2213 static int
2214 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2215 {
2216         u32 val;
2217
2218         if (reset_phy)
2219                 bnx2_reset_phy(bp);
2220
2221         bp->mii_up1 = BCM5708S_UP1;
2222
2223         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2224         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2225         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2226
2227         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2228         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2229         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2230
2231         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2232         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2233         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2234
2235         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2236                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2237                 val |= BCM5708S_UP1_2G5;
2238                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2239         }
2240
2241         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2242             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2243             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2244                 /* increase tx signal amplitude */
2245                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2246                                BCM5708S_BLK_ADDR_TX_MISC);
2247                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2248                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2249                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2250                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2251         }
2252
2253         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2254               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2255
2256         if (val) {
2257                 u32 is_backplane;
2258
2259                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2260                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2261                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2262                                        BCM5708S_BLK_ADDR_TX_MISC);
2263                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2264                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2265                                        BCM5708S_BLK_ADDR_DIG);
2266                 }
2267         }
2268         return 0;
2269 }
2270
2271 static int
2272 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2273 {
2274         if (reset_phy)
2275                 bnx2_reset_phy(bp);
2276
2277         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2278
2279         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2280                 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2281
2282         if (bp->dev->mtu > 1500) {
2283                 u32 val;
2284
2285                 /* Set extended packet length bit */
2286                 bnx2_write_phy(bp, 0x18, 0x7);
2287                 bnx2_read_phy(bp, 0x18, &val);
2288                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2289
2290                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2291                 bnx2_read_phy(bp, 0x1c, &val);
2292                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2293         }
2294         else {
2295                 u32 val;
2296
2297                 bnx2_write_phy(bp, 0x18, 0x7);
2298                 bnx2_read_phy(bp, 0x18, &val);
2299                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2300
2301                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2302                 bnx2_read_phy(bp, 0x1c, &val);
2303                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2304         }
2305
2306         return 0;
2307 }
2308
2309 static int
2310 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2311 {
2312         u32 val;
2313
2314         if (reset_phy)
2315                 bnx2_reset_phy(bp);
2316
2317         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2318                 bnx2_write_phy(bp, 0x18, 0x0c00);
2319                 bnx2_write_phy(bp, 0x17, 0x000a);
2320                 bnx2_write_phy(bp, 0x15, 0x310b);
2321                 bnx2_write_phy(bp, 0x17, 0x201f);
2322                 bnx2_write_phy(bp, 0x15, 0x9506);
2323                 bnx2_write_phy(bp, 0x17, 0x401f);
2324                 bnx2_write_phy(bp, 0x15, 0x14e2);
2325                 bnx2_write_phy(bp, 0x18, 0x0400);
2326         }
2327
2328         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2329                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2330                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2331                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2332                 val &= ~(1 << 8);
2333                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2334         }
2335
2336         if (bp->dev->mtu > 1500) {
2337                 /* Set extended packet length bit */
2338                 bnx2_write_phy(bp, 0x18, 0x7);
2339                 bnx2_read_phy(bp, 0x18, &val);
2340                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2341
2342                 bnx2_read_phy(bp, 0x10, &val);
2343                 bnx2_write_phy(bp, 0x10, val | 0x1);
2344         }
2345         else {
2346                 bnx2_write_phy(bp, 0x18, 0x7);
2347                 bnx2_read_phy(bp, 0x18, &val);
2348                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2349
2350                 bnx2_read_phy(bp, 0x10, &val);
2351                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2352         }
2353
2354         /* ethernet@wirespeed */
2355         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2356         bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2357         val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2358
2359         /* auto-mdix */
2360         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2361                 val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2362
2363         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2364         return 0;
2365 }
2366
2367
2368 static int
2369 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2370 __releases(&bp->phy_lock)
2371 __acquires(&bp->phy_lock)
2372 {
2373         u32 val;
2374         int rc = 0;
2375
2376         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2377         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2378
2379         bp->mii_bmcr = MII_BMCR;
2380         bp->mii_bmsr = MII_BMSR;
2381         bp->mii_bmsr1 = MII_BMSR;
2382         bp->mii_adv = MII_ADVERTISE;
2383         bp->mii_lpa = MII_LPA;
2384
2385         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2386
2387         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2388                 goto setup_phy;
2389
2390         bnx2_read_phy(bp, MII_PHYSID1, &val);
2391         bp->phy_id = val << 16;
2392         bnx2_read_phy(bp, MII_PHYSID2, &val);
2393         bp->phy_id |= val & 0xffff;
2394
2395         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2396                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2397                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2398                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2399                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2400                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2401                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2402         }
2403         else {
2404                 rc = bnx2_init_copper_phy(bp, reset_phy);
2405         }
2406
2407 setup_phy:
2408         if (!rc)
2409                 rc = bnx2_setup_phy(bp, bp->phy_port);
2410
2411         return rc;
2412 }
2413
2414 static int
2415 bnx2_set_mac_loopback(struct bnx2 *bp)
2416 {
2417         u32 mac_mode;
2418
2419         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2420         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2421         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2422         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2423         bp->link_up = 1;
2424         return 0;
2425 }
2426
2427 static int bnx2_test_link(struct bnx2 *);
2428
2429 static int
2430 bnx2_set_phy_loopback(struct bnx2 *bp)
2431 {
2432         u32 mac_mode;
2433         int rc, i;
2434
2435         spin_lock_bh(&bp->phy_lock);
2436         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2437                             BMCR_SPEED1000);
2438         spin_unlock_bh(&bp->phy_lock);
2439         if (rc)
2440                 return rc;
2441
2442         for (i = 0; i < 10; i++) {
2443                 if (bnx2_test_link(bp) == 0)
2444                         break;
2445                 msleep(100);
2446         }
2447
2448         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2449         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2450                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2451                       BNX2_EMAC_MODE_25G_MODE);
2452
2453         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2454         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2455         bp->link_up = 1;
2456         return 0;
2457 }
2458
2459 static void
2460 bnx2_dump_mcp_state(struct bnx2 *bp)
2461 {
2462         struct net_device *dev = bp->dev;
2463         u32 mcp_p0, mcp_p1;
2464
2465         netdev_err(dev, "<--- start MCP states dump --->\n");
2466         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2467                 mcp_p0 = BNX2_MCP_STATE_P0;
2468                 mcp_p1 = BNX2_MCP_STATE_P1;
2469         } else {
2470                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2471                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2472         }
2473         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2474                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2475         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2476                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2477                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2478                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2479         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2480                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2481                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2482                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2483         netdev_err(dev, "DEBUG: shmem states:\n");
2484         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2485                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2486                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2487                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2488         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2489         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2490                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2491                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2492         pr_cont(" condition[%08x]\n",
2493                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2494         DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2495         DP_SHMEM_LINE(bp, 0x3cc);
2496         DP_SHMEM_LINE(bp, 0x3dc);
2497         DP_SHMEM_LINE(bp, 0x3ec);
2498         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2499         netdev_err(dev, "<--- end MCP states dump --->\n");
2500 }
2501
2502 static int
2503 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2504 {
2505         int i;
2506         u32 val;
2507
2508         bp->fw_wr_seq++;
2509         msg_data |= bp->fw_wr_seq;
2510         bp->fw_last_msg = msg_data;
2511
2512         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2513
2514         if (!ack)
2515                 return 0;
2516
2517         /* wait for an acknowledgement. */
2518         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2519                 msleep(10);
2520
2521                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2522
2523                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2524                         break;
2525         }
2526         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2527                 return 0;
2528
2529         /* If we timed out, inform the firmware that this is the case. */
2530         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2531                 msg_data &= ~BNX2_DRV_MSG_CODE;
2532                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2533
2534                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2535                 if (!silent) {
2536                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2537                         bnx2_dump_mcp_state(bp);
2538                 }
2539
2540                 return -EBUSY;
2541         }
2542
2543         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2544                 return -EIO;
2545
2546         return 0;
2547 }
2548
2549 static int
2550 bnx2_init_5709_context(struct bnx2 *bp)
2551 {
2552         int i, ret = 0;
2553         u32 val;
2554
2555         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2556         val |= (BNX2_PAGE_BITS - 8) << 16;
2557         BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2558         for (i = 0; i < 10; i++) {
2559                 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2560                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2561                         break;
2562                 udelay(2);
2563         }
2564         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2565                 return -EBUSY;
2566
2567         for (i = 0; i < bp->ctx_pages; i++) {
2568                 int j;
2569
2570                 if (bp->ctx_blk[i])
2571                         memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2572                 else
2573                         return -ENOMEM;
2574
2575                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2576                         (bp->ctx_blk_mapping[i] & 0xffffffff) |
2577                         BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2578                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2579                         (u64) bp->ctx_blk_mapping[i] >> 32);
2580                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2581                         BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2582                 for (j = 0; j < 10; j++) {
2583
2584                         val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2585                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2586                                 break;
2587                         udelay(5);
2588                 }
2589                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2590                         ret = -EBUSY;
2591                         break;
2592                 }
2593         }
2594         return ret;
2595 }
2596
2597 static void
2598 bnx2_init_context(struct bnx2 *bp)
2599 {
2600         u32 vcid;
2601
2602         vcid = 96;
2603         while (vcid) {
2604                 u32 vcid_addr, pcid_addr, offset;
2605                 int i;
2606
2607                 vcid--;
2608
2609                 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2610                         u32 new_vcid;
2611
2612                         vcid_addr = GET_PCID_ADDR(vcid);
2613                         if (vcid & 0x8) {
2614                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2615                         }
2616                         else {
2617                                 new_vcid = vcid;
2618                         }
2619                         pcid_addr = GET_PCID_ADDR(new_vcid);
2620                 }
2621                 else {
2622                         vcid_addr = GET_CID_ADDR(vcid);
2623                         pcid_addr = vcid_addr;
2624                 }
2625
2626                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2627                         vcid_addr += (i << PHY_CTX_SHIFT);
2628                         pcid_addr += (i << PHY_CTX_SHIFT);
2629
2630                         BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2631                         BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2632
2633                         /* Zero out the context. */
2634                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2635                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2636                 }
2637         }
2638 }
2639
2640 static int
2641 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2642 {
2643         u16 *good_mbuf;
2644         u32 good_mbuf_cnt;
2645         u32 val;
2646
2647         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2648         if (good_mbuf == NULL)
2649                 return -ENOMEM;
2650
2651         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2652                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2653
2654         good_mbuf_cnt = 0;
2655
2656         /* Allocate a bunch of mbufs and save the good ones in an array. */
2657         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2658         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2659                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2660                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2661
2662                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2663
2664                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2665
2666                 /* The addresses with Bit 9 set are bad memory blocks. */
2667                 if (!(val & (1 << 9))) {
2668                         good_mbuf[good_mbuf_cnt] = (u16) val;
2669                         good_mbuf_cnt++;
2670                 }
2671
2672                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2673         }
2674
2675         /* Free the good ones back to the mbuf pool thus discarding
2676          * all the bad ones. */
2677         while (good_mbuf_cnt) {
2678                 good_mbuf_cnt--;
2679
2680                 val = good_mbuf[good_mbuf_cnt];
2681                 val = (val << 9) | val | 1;
2682
2683                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2684         }
2685         kfree(good_mbuf);
2686         return 0;
2687 }
2688
2689 static void
2690 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2691 {
2692         u32 val;
2693
2694         val = (mac_addr[0] << 8) | mac_addr[1];
2695
2696         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2697
2698         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2699                 (mac_addr[4] << 8) | mac_addr[5];
2700
2701         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2702 }
2703
2704 static inline int
2705 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2706 {
2707         dma_addr_t mapping;
2708         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2709         struct bnx2_rx_bd *rxbd =
2710                 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2711         struct page *page = alloc_page(gfp);
2712
2713         if (!page)
2714                 return -ENOMEM;
2715         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2716                                PCI_DMA_FROMDEVICE);
2717         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2718                 __free_page(page);
2719                 return -EIO;
2720         }
2721
2722         rx_pg->page = page;
2723         dma_unmap_addr_set(rx_pg, mapping, mapping);
2724         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2725         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2726         return 0;
2727 }
2728
2729 static void
2730 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2731 {
2732         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2733         struct page *page = rx_pg->page;
2734
2735         if (!page)
2736                 return;
2737
2738         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2739                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2740
2741         __free_page(page);
2742         rx_pg->page = NULL;
2743 }
2744
2745 static inline int
2746 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2747 {
2748         u8 *data;
2749         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2750         dma_addr_t mapping;
2751         struct bnx2_rx_bd *rxbd =
2752                 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2753
2754         data = kmalloc(bp->rx_buf_size, gfp);
2755         if (!data)
2756                 return -ENOMEM;
2757
2758         mapping = dma_map_single(&bp->pdev->dev,
2759                                  get_l2_fhdr(data),
2760                                  bp->rx_buf_use_size,
2761                                  PCI_DMA_FROMDEVICE);
2762         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2763                 kfree(data);
2764                 return -EIO;
2765         }
2766
2767         rx_buf->data = data;
2768         dma_unmap_addr_set(rx_buf, mapping, mapping);
2769
2770         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2771         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2772
2773         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2774
2775         return 0;
2776 }
2777
2778 static int
2779 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2780 {
2781         struct status_block *sblk = bnapi->status_blk.msi;
2782         u32 new_link_state, old_link_state;
2783         int is_set = 1;
2784
2785         new_link_state = sblk->status_attn_bits & event;
2786         old_link_state = sblk->status_attn_bits_ack & event;
2787         if (new_link_state != old_link_state) {
2788                 if (new_link_state)
2789                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2790                 else
2791                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2792         } else
2793                 is_set = 0;
2794
2795         return is_set;
2796 }
2797
2798 static void
2799 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2800 {
2801         spin_lock(&bp->phy_lock);
2802
2803         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2804                 bnx2_set_link(bp);
2805         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2806                 bnx2_set_remote_link(bp);
2807
2808         spin_unlock(&bp->phy_lock);
2809
2810 }
2811
2812 static inline u16
2813 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2814 {
2815         u16 cons;
2816
2817         /* Tell compiler that status block fields can change. */
2818         barrier();
2819         cons = *bnapi->hw_tx_cons_ptr;
2820         barrier();
2821         if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2822                 cons++;
2823         return cons;
2824 }
2825
2826 static int
2827 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2828 {
2829         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2830         u16 hw_cons, sw_cons, sw_ring_cons;
2831         int tx_pkt = 0, index;
2832         unsigned int tx_bytes = 0;
2833         struct netdev_queue *txq;
2834
2835         index = (bnapi - bp->bnx2_napi);
2836         txq = netdev_get_tx_queue(bp->dev, index);
2837
2838         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2839         sw_cons = txr->tx_cons;
2840
2841         while (sw_cons != hw_cons) {
2842                 struct bnx2_sw_tx_bd *tx_buf;
2843                 struct sk_buff *skb;
2844                 int i, last;
2845
2846                 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2847
2848                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2849                 skb = tx_buf->skb;
2850
2851                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2852                 prefetch(&skb->end);
2853
2854                 /* partial BD completions possible with TSO packets */
2855                 if (tx_buf->is_gso) {
2856                         u16 last_idx, last_ring_idx;
2857
2858                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2859                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2860                         if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2861                                 last_idx++;
2862                         }
2863                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2864                                 break;
2865                         }
2866                 }
2867
2868                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2869                         skb_headlen(skb), PCI_DMA_TODEVICE);
2870
2871                 tx_buf->skb = NULL;
2872                 last = tx_buf->nr_frags;
2873
2874                 for (i = 0; i < last; i++) {
2875                         struct bnx2_sw_tx_bd *tx_buf;
2876
2877                         sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2878
2879                         tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2880                         dma_unmap_page(&bp->pdev->dev,
2881                                 dma_unmap_addr(tx_buf, mapping),
2882                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2883                                 PCI_DMA_TODEVICE);
2884                 }
2885
2886                 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2887
2888                 tx_bytes += skb->len;
2889                 dev_kfree_skb(skb);
2890                 tx_pkt++;
2891                 if (tx_pkt == budget)
2892                         break;
2893
2894                 if (hw_cons == sw_cons)
2895                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2896         }
2897
2898         netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2899         txr->hw_tx_cons = hw_cons;
2900         txr->tx_cons = sw_cons;
2901
2902         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2903          * before checking for netif_tx_queue_stopped().  Without the
2904          * memory barrier, there is a small possibility that bnx2_start_xmit()
2905          * will miss it and cause the queue to be stopped forever.
2906          */
2907         smp_mb();
2908
2909         if (unlikely(netif_tx_queue_stopped(txq)) &&
2910                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2911                 __netif_tx_lock(txq, smp_processor_id());
2912                 if ((netif_tx_queue_stopped(txq)) &&
2913                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2914                         netif_tx_wake_queue(txq);
2915                 __netif_tx_unlock(txq);
2916         }
2917
2918         return tx_pkt;
2919 }
2920
2921 static void
2922 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2923                         struct sk_buff *skb, int count)
2924 {
2925         struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2926         struct bnx2_rx_bd *cons_bd, *prod_bd;
2927         int i;
2928         u16 hw_prod, prod;
2929         u16 cons = rxr->rx_pg_cons;
2930
2931         cons_rx_pg = &rxr->rx_pg_ring[cons];
2932
2933         /* The caller was unable to allocate a new page to replace the
2934          * last one in the frags array, so we need to recycle that page
2935          * and then free the skb.
2936          */
2937         if (skb) {
2938                 struct page *page;
2939                 struct skb_shared_info *shinfo;
2940
2941                 shinfo = skb_shinfo(skb);
2942                 shinfo->nr_frags--;
2943                 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2944                 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2945
2946                 cons_rx_pg->page = page;
2947                 dev_kfree_skb(skb);
2948         }
2949
2950         hw_prod = rxr->rx_pg_prod;
2951
2952         for (i = 0; i < count; i++) {
2953                 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2954
2955                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2956                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2957                 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2958                                                 [BNX2_RX_IDX(cons)];
2959                 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2960                                                 [BNX2_RX_IDX(prod)];
2961
2962                 if (prod != cons) {
2963                         prod_rx_pg->page = cons_rx_pg->page;
2964                         cons_rx_pg->page = NULL;
2965                         dma_unmap_addr_set(prod_rx_pg, mapping,
2966                                 dma_unmap_addr(cons_rx_pg, mapping));
2967
2968                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2969                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2970
2971                 }
2972                 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2973                 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2974         }
2975         rxr->rx_pg_prod = hw_prod;
2976         rxr->rx_pg_cons = cons;
2977 }
2978
2979 static inline void
2980 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2981                    u8 *data, u16 cons, u16 prod)
2982 {
2983         struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2984         struct bnx2_rx_bd *cons_bd, *prod_bd;
2985
2986         cons_rx_buf = &rxr->rx_buf_ring[cons];
2987         prod_rx_buf = &rxr->rx_buf_ring[prod];
2988
2989         dma_sync_single_for_device(&bp->pdev->dev,
2990                 dma_unmap_addr(cons_rx_buf, mapping),
2991                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2992
2993         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2994
2995         prod_rx_buf->data = data;
2996
2997         if (cons == prod)
2998                 return;
2999
3000         dma_unmap_addr_set(prod_rx_buf, mapping,
3001                         dma_unmap_addr(cons_rx_buf, mapping));
3002
3003         cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3004         prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3005         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3006         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3007 }
3008
3009 static struct sk_buff *
3010 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3011             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3012             u32 ring_idx)
3013 {
3014         int err;
3015         u16 prod = ring_idx & 0xffff;
3016         struct sk_buff *skb;
3017
3018         err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3019         if (unlikely(err)) {
3020                 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3021 error:
3022                 if (hdr_len) {
3023                         unsigned int raw_len = len + 4;
3024                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3025
3026                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3027                 }
3028                 return NULL;
3029         }
3030
3031         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3032                          PCI_DMA_FROMDEVICE);
3033         skb = build_skb(data, 0);
3034         if (!skb) {
3035                 kfree(data);
3036                 goto error;
3037         }
3038         skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3039         if (hdr_len == 0) {
3040                 skb_put(skb, len);
3041                 return skb;
3042         } else {
3043                 unsigned int i, frag_len, frag_size, pages;
3044                 struct bnx2_sw_pg *rx_pg;
3045                 u16 pg_cons = rxr->rx_pg_cons;
3046                 u16 pg_prod = rxr->rx_pg_prod;
3047
3048                 frag_size = len + 4 - hdr_len;
3049                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3050                 skb_put(skb, hdr_len);
3051
3052                 for (i = 0; i < pages; i++) {
3053                         dma_addr_t mapping_old;
3054
3055                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3056                         if (unlikely(frag_len <= 4)) {
3057                                 unsigned int tail = 4 - frag_len;
3058
3059                                 rxr->rx_pg_cons = pg_cons;
3060                                 rxr->rx_pg_prod = pg_prod;
3061                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3062                                                         pages - i);
3063                                 skb->len -= tail;
3064                                 if (i == 0) {
3065                                         skb->tail -= tail;
3066                                 } else {
3067                                         skb_frag_t *frag =
3068                                                 &skb_shinfo(skb)->frags[i - 1];
3069                                         skb_frag_size_sub(frag, tail);
3070                                         skb->data_len -= tail;
3071                                 }
3072                                 return skb;
3073                         }
3074                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3075
3076                         /* Don't unmap yet.  If we're unable to allocate a new
3077                          * page, we need to recycle the page and the DMA addr.
3078                          */
3079                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3080                         if (i == pages - 1)
3081                                 frag_len -= 4;
3082
3083                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3084                         rx_pg->page = NULL;
3085
3086                         err = bnx2_alloc_rx_page(bp, rxr,
3087                                                  BNX2_RX_PG_RING_IDX(pg_prod),
3088                                                  GFP_ATOMIC);
3089                         if (unlikely(err)) {
3090                                 rxr->rx_pg_cons = pg_cons;
3091                                 rxr->rx_pg_prod = pg_prod;
3092                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3093                                                         pages - i);
3094                                 return NULL;
3095                         }
3096
3097                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3098                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3099
3100                         frag_size -= frag_len;
3101                         skb->data_len += frag_len;
3102                         skb->truesize += PAGE_SIZE;
3103                         skb->len += frag_len;
3104
3105                         pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3106                         pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3107                 }
3108                 rxr->rx_pg_prod = pg_prod;
3109                 rxr->rx_pg_cons = pg_cons;
3110         }
3111         return skb;
3112 }
3113
3114 static inline u16
3115 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3116 {
3117         u16 cons;
3118
3119         /* Tell compiler that status block fields can change. */
3120         barrier();
3121         cons = *bnapi->hw_rx_cons_ptr;
3122         barrier();
3123         if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3124                 cons++;
3125         return cons;
3126 }
3127
3128 static int
3129 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3130 {
3131         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3132         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3133         struct l2_fhdr *rx_hdr;
3134         int rx_pkt = 0, pg_ring_used = 0;
3135
3136         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3137         sw_cons = rxr->rx_cons;
3138         sw_prod = rxr->rx_prod;
3139
3140         /* Memory barrier necessary as speculative reads of the rx
3141          * buffer can be ahead of the index in the status block
3142          */
3143         rmb();
3144         while (sw_cons != hw_cons) {
3145                 unsigned int len, hdr_len;
3146                 u32 status;
3147                 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3148                 struct sk_buff *skb;
3149                 dma_addr_t dma_addr;
3150                 u8 *data;
3151                 u16 next_ring_idx;
3152
3153                 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3154                 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3155
3156                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3157                 data = rx_buf->data;
3158                 rx_buf->data = NULL;
3159
3160                 rx_hdr = get_l2_fhdr(data);
3161                 prefetch(rx_hdr);
3162
3163                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3164
3165                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3166                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3167                         PCI_DMA_FROMDEVICE);
3168
3169                 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3170                 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3171                 prefetch(get_l2_fhdr(next_rx_buf->data));
3172
3173                 len = rx_hdr->l2_fhdr_pkt_len;
3174                 status = rx_hdr->l2_fhdr_status;
3175
3176                 hdr_len = 0;
3177                 if (status & L2_FHDR_STATUS_SPLIT) {
3178                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3179                         pg_ring_used = 1;
3180                 } else if (len > bp->rx_jumbo_thresh) {
3181                         hdr_len = bp->rx_jumbo_thresh;
3182                         pg_ring_used = 1;
3183                 }
3184
3185                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3186                                        L2_FHDR_ERRORS_PHY_DECODE |
3187                                        L2_FHDR_ERRORS_ALIGNMENT |
3188                                        L2_FHDR_ERRORS_TOO_SHORT |
3189                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3190
3191                         bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3192                                           sw_ring_prod);
3193                         if (pg_ring_used) {
3194                                 int pages;
3195
3196                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3197
3198                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3199                         }
3200                         goto next_rx;
3201                 }
3202
3203                 len -= 4;
3204
3205                 if (len <= bp->rx_copy_thresh) {
3206                         skb = netdev_alloc_skb(bp->dev, len + 6);
3207                         if (skb == NULL) {
3208                                 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3209                                                   sw_ring_prod);
3210                                 goto next_rx;
3211                         }
3212
3213                         /* aligned copy */
3214                         memcpy(skb->data,
3215                                (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3216                                len + 6);
3217                         skb_reserve(skb, 6);
3218                         skb_put(skb, len);
3219
3220                         bnx2_reuse_rx_data(bp, rxr, data,
3221                                 sw_ring_cons, sw_ring_prod);
3222
3223                 } else {
3224                         skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3225                                           (sw_ring_cons << 16) | sw_ring_prod);
3226                         if (!skb)
3227                                 goto next_rx;
3228                 }
3229                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3230                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3231                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3232
3233                 skb->protocol = eth_type_trans(skb, bp->dev);
3234
3235                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3236                         (ntohs(skb->protocol) != 0x8100)) {
3237
3238                         dev_kfree_skb(skb);
3239                         goto next_rx;
3240
3241                 }
3242
3243                 skb_checksum_none_assert(skb);
3244                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3245                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3246                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3247
3248                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3249                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3250                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3251                 }
3252                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3253                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3254                      L2_FHDR_STATUS_USE_RXHASH))
3255                         skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3256                                      PKT_HASH_TYPE_L3);
3257
3258                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3259                 napi_gro_receive(&bnapi->napi, skb);
3260                 rx_pkt++;
3261
3262 next_rx:
3263                 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3264                 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3265
3266                 if ((rx_pkt == budget))
3267                         break;
3268
3269                 /* Refresh hw_cons to see if there is new work */
3270                 if (sw_cons == hw_cons) {
3271                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3272                         rmb();
3273                 }
3274         }
3275         rxr->rx_cons = sw_cons;
3276         rxr->rx_prod = sw_prod;
3277
3278         if (pg_ring_used)
3279                 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3280
3281         BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3282
3283         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3284
3285         mmiowb();
3286
3287         return rx_pkt;
3288
3289 }
3290
3291 /* MSI ISR - The only difference between this and the INTx ISR
3292  * is that the MSI interrupt is always serviced.
3293  */
3294 static irqreturn_t
3295 bnx2_msi(int irq, void *dev_instance)
3296 {
3297         struct bnx2_napi *bnapi = dev_instance;
3298         struct bnx2 *bp = bnapi->bp;
3299
3300         prefetch(bnapi->status_blk.msi);
3301         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3302                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3303                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3304
3305         /* Return here if interrupt is disabled. */
3306         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3307                 return IRQ_HANDLED;
3308
3309         napi_schedule(&bnapi->napi);
3310
3311         return IRQ_HANDLED;
3312 }
3313
3314 static irqreturn_t
3315 bnx2_msi_1shot(int irq, void *dev_instance)
3316 {
3317         struct bnx2_napi *bnapi = dev_instance;
3318         struct bnx2 *bp = bnapi->bp;
3319
3320         prefetch(bnapi->status_blk.msi);
3321
3322         /* Return here if interrupt is disabled. */
3323         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3324                 return IRQ_HANDLED;
3325
3326         napi_schedule(&bnapi->napi);
3327
3328         return IRQ_HANDLED;
3329 }
3330
3331 static irqreturn_t
3332 bnx2_interrupt(int irq, void *dev_instance)
3333 {
3334         struct bnx2_napi *bnapi = dev_instance;
3335         struct bnx2 *bp = bnapi->bp;
3336         struct status_block *sblk = bnapi->status_blk.msi;
3337
3338         /* When using INTx, it is possible for the interrupt to arrive
3339          * at the CPU before the status block posted prior to the
3340          * interrupt. Reading a register will flush the status block.
3341          * When using MSI, the MSI message will always complete after
3342          * the status block write.
3343          */
3344         if ((sblk->status_idx == bnapi->last_status_idx) &&
3345             (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3346              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3347                 return IRQ_NONE;
3348
3349         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3350                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3351                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3352
3353         /* Read back to deassert IRQ immediately to avoid too many
3354          * spurious interrupts.
3355          */
3356         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3357
3358         /* Return here if interrupt is shared and is disabled. */
3359         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3360                 return IRQ_HANDLED;
3361
3362         if (napi_schedule_prep(&bnapi->napi)) {
3363                 bnapi->last_status_idx = sblk->status_idx;
3364                 __napi_schedule(&bnapi->napi);
3365         }
3366
3367         return IRQ_HANDLED;
3368 }
3369
3370 static inline int
3371 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3372 {
3373         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3374         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3375
3376         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3377             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3378                 return 1;
3379         return 0;
3380 }
3381
3382 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3383                                  STATUS_ATTN_BITS_TIMER_ABORT)
3384
3385 static inline int
3386 bnx2_has_work(struct bnx2_napi *bnapi)
3387 {
3388         struct status_block *sblk = bnapi->status_blk.msi;
3389
3390         if (bnx2_has_fast_work(bnapi))
3391                 return 1;
3392
3393 #ifdef BCM_CNIC
3394         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3395                 return 1;
3396 #endif
3397
3398         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3399             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3400                 return 1;
3401
3402         return 0;
3403 }
3404
3405 static void
3406 bnx2_chk_missed_msi(struct bnx2 *bp)
3407 {
3408         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3409         u32 msi_ctrl;
3410
3411         if (bnx2_has_work(bnapi)) {
3412                 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3413                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3414                         return;
3415
3416                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3417                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3418                                 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3419                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3420                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3421                 }
3422         }
3423
3424         bp->idle_chk_status_idx = bnapi->last_status_idx;
3425 }
3426
3427 #ifdef BCM_CNIC
3428 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3429 {
3430         struct cnic_ops *c_ops;
3431
3432         if (!bnapi->cnic_present)
3433                 return;
3434
3435         rcu_read_lock();
3436         c_ops = rcu_dereference(bp->cnic_ops);
3437         if (c_ops)
3438                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3439                                                       bnapi->status_blk.msi);
3440         rcu_read_unlock();
3441 }
3442 #endif
3443
3444 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3445 {
3446         struct status_block *sblk = bnapi->status_blk.msi;
3447         u32 status_attn_bits = sblk->status_attn_bits;
3448         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3449
3450         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3451             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3452
3453                 bnx2_phy_int(bp, bnapi);
3454
3455                 /* This is needed to take care of transient status
3456                  * during link changes.
3457                  */
3458                 BNX2_WR(bp, BNX2_HC_COMMAND,
3459                         bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3460                 BNX2_RD(bp, BNX2_HC_COMMAND);
3461         }
3462 }
3463
3464 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3465                           int work_done, int budget)
3466 {
3467         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3468         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3469
3470         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3471                 bnx2_tx_int(bp, bnapi, 0);
3472
3473         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3474                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3475
3476         return work_done;
3477 }
3478
3479 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3480 {
3481         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3482         struct bnx2 *bp = bnapi->bp;
3483         int work_done = 0;
3484         struct status_block_msix *sblk = bnapi->status_blk.msix;
3485
3486         while (1) {
3487                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3488                 if (unlikely(work_done >= budget))
3489                         break;
3490
3491                 bnapi->last_status_idx = sblk->status_idx;
3492                 /* status idx must be read before checking for more work. */
3493                 rmb();
3494                 if (likely(!bnx2_has_fast_work(bnapi))) {
3495
3496                         napi_complete(napi);
3497                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3498                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3499                                 bnapi->last_status_idx);
3500                         break;
3501                 }
3502         }
3503         return work_done;
3504 }
3505
3506 static int bnx2_poll(struct napi_struct *napi, int budget)
3507 {
3508         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3509         struct bnx2 *bp = bnapi->bp;
3510         int work_done = 0;
3511         struct status_block *sblk = bnapi->status_blk.msi;
3512
3513         while (1) {
3514                 bnx2_poll_link(bp, bnapi);
3515
3516                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3517
3518 #ifdef BCM_CNIC
3519                 bnx2_poll_cnic(bp, bnapi);
3520 #endif
3521
3522                 /* bnapi->last_status_idx is used below to tell the hw how
3523                  * much work has been processed, so we must read it before
3524                  * checking for more work.
3525                  */
3526                 bnapi->last_status_idx = sblk->status_idx;
3527
3528                 if (unlikely(work_done >= budget))
3529                         break;
3530
3531                 rmb();
3532                 if (likely(!bnx2_has_work(bnapi))) {
3533                         napi_complete(napi);
3534                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3535                                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3536                                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3537                                         bnapi->last_status_idx);
3538                                 break;
3539                         }
3540                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3541                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3542                                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3543                                 bnapi->last_status_idx);
3544
3545                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3546                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3547                                 bnapi->last_status_idx);
3548                         break;
3549                 }
3550         }
3551
3552         return work_done;
3553 }
3554
3555 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3556  * from set_multicast.
3557  */
3558 static void
3559 bnx2_set_rx_mode(struct net_device *dev)
3560 {
3561         struct bnx2 *bp = netdev_priv(dev);
3562         u32 rx_mode, sort_mode;
3563         struct netdev_hw_addr *ha;
3564         int i;
3565
3566         if (!netif_running(dev))
3567                 return;
3568
3569         spin_lock_bh(&bp->phy_lock);
3570
3571         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3572                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3573         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3574         if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3575              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3576                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3577         if (dev->flags & IFF_PROMISC) {
3578                 /* Promiscuous mode. */
3579                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3580                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3581                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3582         }
3583         else if (dev->flags & IFF_ALLMULTI) {
3584                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3585                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3586                                 0xffffffff);
3587                 }
3588                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3589         }
3590         else {
3591                 /* Accept one or more multicast(s). */
3592                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3593                 u32 regidx;
3594                 u32 bit;
3595                 u32 crc;
3596
3597                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3598
3599                 netdev_for_each_mc_addr(ha, dev) {
3600                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3601                         bit = crc & 0xff;
3602                         regidx = (bit & 0xe0) >> 5;
3603                         bit &= 0x1f;
3604                         mc_filter[regidx] |= (1 << bit);
3605                 }
3606
3607                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3608                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3609                                 mc_filter[i]);
3610                 }
3611
3612                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3613         }
3614
3615         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3616                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3617                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3618                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3619         } else if (!(dev->flags & IFF_PROMISC)) {
3620                 /* Add all entries into to the match filter list */
3621                 i = 0;
3622                 netdev_for_each_uc_addr(ha, dev) {
3623                         bnx2_set_mac_addr(bp, ha->addr,
3624                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3625                         sort_mode |= (1 <<
3626                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3627                         i++;
3628                 }
3629
3630         }
3631
3632         if (rx_mode != bp->rx_mode) {
3633                 bp->rx_mode = rx_mode;
3634                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3635         }
3636
3637         BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3638         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3639         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3640
3641         spin_unlock_bh(&bp->phy_lock);
3642 }
3643
3644 static int
3645 check_fw_section(const struct firmware *fw,
3646                  const struct bnx2_fw_file_section *section,
3647                  u32 alignment, bool non_empty)
3648 {
3649         u32 offset = be32_to_cpu(section->offset);
3650         u32 len = be32_to_cpu(section->len);
3651
3652         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3653                 return -EINVAL;
3654         if ((non_empty && len == 0) || len > fw->size - offset ||
3655             len & (alignment - 1))
3656                 return -EINVAL;
3657         return 0;
3658 }
3659
3660 static int
3661 check_mips_fw_entry(const struct firmware *fw,
3662                     const struct bnx2_mips_fw_file_entry *entry)
3663 {
3664         if (check_fw_section(fw, &entry->text, 4, true) ||
3665             check_fw_section(fw, &entry->data, 4, false) ||
3666             check_fw_section(fw, &entry->rodata, 4, false))
3667                 return -EINVAL;
3668         return 0;
3669 }
3670
3671 static void bnx2_release_firmware(struct bnx2 *bp)
3672 {
3673         if (bp->rv2p_firmware) {
3674                 release_firmware(bp->mips_firmware);
3675                 release_firmware(bp->rv2p_firmware);
3676                 bp->rv2p_firmware = NULL;
3677         }
3678 }
3679
3680 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3681 {
3682         const char *mips_fw_file, *rv2p_fw_file;
3683         const struct bnx2_mips_fw_file *mips_fw;
3684         const struct bnx2_rv2p_fw_file *rv2p_fw;
3685         int rc;
3686
3687         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3688                 mips_fw_file = FW_MIPS_FILE_09;
3689                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3690                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3691                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3692                 else
3693                         rv2p_fw_file = FW_RV2P_FILE_09;
3694         } else {
3695                 mips_fw_file = FW_MIPS_FILE_06;
3696                 rv2p_fw_file = FW_RV2P_FILE_06;
3697         }
3698
3699         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3700         if (rc) {
3701                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3702                 goto out;
3703         }
3704
3705         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3706         if (rc) {
3707                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3708                 goto err_release_mips_firmware;
3709         }
3710         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3711         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3712         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3713             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3714             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3715             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3716             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3717             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3718                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3719                 rc = -EINVAL;
3720                 goto err_release_firmware;
3721         }
3722         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3723             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3724             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3725                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3726                 rc = -EINVAL;
3727                 goto err_release_firmware;
3728         }
3729 out:
3730         return rc;
3731
3732 err_release_firmware:
3733         release_firmware(bp->rv2p_firmware);
3734         bp->rv2p_firmware = NULL;
3735 err_release_mips_firmware:
3736         release_firmware(bp->mips_firmware);
3737         goto out;
3738 }
3739
3740 static int bnx2_request_firmware(struct bnx2 *bp)
3741 {
3742         return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3743 }
3744
3745 static u32
3746 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3747 {
3748         switch (idx) {
3749         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3750                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3751                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3752                 break;
3753         }
3754         return rv2p_code;
3755 }
3756
3757 static int
3758 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3759              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3760 {
3761         u32 rv2p_code_len, file_offset;
3762         __be32 *rv2p_code;
3763         int i;
3764         u32 val, cmd, addr;
3765
3766         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3767         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3768
3769         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3770
3771         if (rv2p_proc == RV2P_PROC1) {
3772                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3773                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3774         } else {
3775                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3776                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3777         }
3778
3779         for (i = 0; i < rv2p_code_len; i += 8) {
3780                 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3781                 rv2p_code++;
3782                 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3783                 rv2p_code++;
3784
3785                 val = (i / 8) | cmd;
3786                 BNX2_WR(bp, addr, val);
3787         }
3788
3789         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3790         for (i = 0; i < 8; i++) {
3791                 u32 loc, code;
3792
3793                 loc = be32_to_cpu(fw_entry->fixup[i]);
3794                 if (loc && ((loc * 4) < rv2p_code_len)) {
3795                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3796                         BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3797                         code = be32_to_cpu(*(rv2p_code + loc));
3798                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3799                         BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3800
3801                         val = (loc / 2) | cmd;
3802                         BNX2_WR(bp, addr, val);
3803                 }
3804         }
3805
3806         /* Reset the processor, un-stall is done later. */
3807         if (rv2p_proc == RV2P_PROC1) {
3808                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3809         }
3810         else {
3811                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3812         }
3813
3814         return 0;
3815 }
3816
3817 static int
3818 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3819             const struct bnx2_mips_fw_file_entry *fw_entry)
3820 {
3821         u32 addr, len, file_offset;
3822         __be32 *data;
3823         u32 offset;
3824         u32 val;
3825
3826         /* Halt the CPU. */
3827         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3828         val |= cpu_reg->mode_value_halt;
3829         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3830         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3831
3832         /* Load the Text area. */
3833         addr = be32_to_cpu(fw_entry->text.addr);
3834         len = be32_to_cpu(fw_entry->text.len);
3835         file_offset = be32_to_cpu(fw_entry->text.offset);
3836         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3837
3838         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3839         if (len) {
3840                 int j;
3841
3842                 for (j = 0; j < (len / 4); j++, offset += 4)
3843                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3844         }
3845
3846         /* Load the Data area. */
3847         addr = be32_to_cpu(fw_entry->data.addr);
3848         len = be32_to_cpu(fw_entry->data.len);
3849         file_offset = be32_to_cpu(fw_entry->data.offset);
3850         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3851
3852         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3853         if (len) {
3854                 int j;
3855
3856                 for (j = 0; j < (len / 4); j++, offset += 4)
3857                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3858         }
3859
3860         /* Load the Read-Only area. */
3861         addr = be32_to_cpu(fw_entry->rodata.addr);
3862         len = be32_to_cpu(fw_entry->rodata.len);
3863         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3864         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3865
3866         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3867         if (len) {
3868                 int j;
3869
3870                 for (j = 0; j < (len / 4); j++, offset += 4)
3871                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3872         }
3873
3874         /* Clear the pre-fetch instruction. */
3875         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3876
3877         val = be32_to_cpu(fw_entry->start_addr);
3878         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3879
3880         /* Start the CPU. */
3881         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3882         val &= ~cpu_reg->mode_value_halt;
3883         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3884         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3885
3886         return 0;
3887 }
3888
3889 static int
3890 bnx2_init_cpus(struct bnx2 *bp)
3891 {
3892         const struct bnx2_mips_fw_file *mips_fw =
3893                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3894         const struct bnx2_rv2p_fw_file *rv2p_fw =
3895                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3896         int rc;
3897
3898         /* Initialize the RV2P processor. */
3899         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3900         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3901
3902         /* Initialize the RX Processor. */
3903         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3904         if (rc)
3905                 goto init_cpu_err;
3906
3907         /* Initialize the TX Processor. */
3908         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3909         if (rc)
3910                 goto init_cpu_err;
3911
3912         /* Initialize the TX Patch-up Processor. */
3913         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3914         if (rc)
3915                 goto init_cpu_err;
3916
3917         /* Initialize the Completion Processor. */
3918         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3919         if (rc)
3920                 goto init_cpu_err;
3921
3922         /* Initialize the Command Processor. */
3923         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3924
3925 init_cpu_err:
3926         return rc;
3927 }
3928
3929 static void
3930 bnx2_setup_wol(struct bnx2 *bp)
3931 {
3932         int i;
3933         u32 val, wol_msg;
3934
3935         if (bp->wol) {
3936                 u32 advertising;
3937                 u8 autoneg;
3938
3939                 autoneg = bp->autoneg;
3940                 advertising = bp->advertising;
3941
3942                 if (bp->phy_port == PORT_TP) {
3943                         bp->autoneg = AUTONEG_SPEED;
3944                         bp->advertising = ADVERTISED_10baseT_Half |
3945                                 ADVERTISED_10baseT_Full |
3946                                 ADVERTISED_100baseT_Half |
3947                                 ADVERTISED_100baseT_Full |
3948                                 ADVERTISED_Autoneg;
3949                 }
3950
3951                 spin_lock_bh(&bp->phy_lock);
3952                 bnx2_setup_phy(bp, bp->phy_port);
3953                 spin_unlock_bh(&bp->phy_lock);
3954
3955                 bp->autoneg = autoneg;
3956                 bp->advertising = advertising;
3957
3958                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3959
3960                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3961
3962                 /* Enable port mode. */
3963                 val &= ~BNX2_EMAC_MODE_PORT;
3964                 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3965                        BNX2_EMAC_MODE_ACPI_RCVD |
3966                        BNX2_EMAC_MODE_MPKT;
3967                 if (bp->phy_port == PORT_TP) {
3968                         val |= BNX2_EMAC_MODE_PORT_MII;
3969                 } else {
3970                         val |= BNX2_EMAC_MODE_PORT_GMII;
3971                         if (bp->line_speed == SPEED_2500)
3972                                 val |= BNX2_EMAC_MODE_25G_MODE;
3973                 }
3974
3975                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3976
3977                 /* receive all multicast */
3978                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3979                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3980                                 0xffffffff);
3981                 }
3982                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3983
3984                 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3985                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3986                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3987                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3988
3989                 /* Need to enable EMAC and RPM for WOL. */
3990                 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3991                         BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3992                         BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3993                         BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3994
3995                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3996                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3997                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3998
3999                 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4000         } else {
4001                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4002         }
4003
4004         if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4005                 u32 val;
4006
4007                 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4008                 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4009                         bnx2_fw_sync(bp, wol_msg, 1, 0);
4010                         return;
4011                 }
4012                 /* Tell firmware not to power down the PHY yet, otherwise
4013                  * the chip will take a long time to respond to MMIO reads.
4014                  */
4015                 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4016                 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4017                               val | BNX2_PORT_FEATURE_ASF_ENABLED);
4018                 bnx2_fw_sync(bp, wol_msg, 1, 0);
4019                 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4020         }
4021
4022 }
4023
4024 static int
4025 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4026 {
4027         switch (state) {
4028         case PCI_D0: {
4029                 u32 val;
4030
4031                 pci_enable_wake(bp->pdev, PCI_D0, false);
4032                 pci_set_power_state(bp->pdev, PCI_D0);
4033
4034                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4035                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4036                 val &= ~BNX2_EMAC_MODE_MPKT;
4037                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4038
4039                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4040                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4041                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4042                 break;
4043         }
4044         case PCI_D3hot: {
4045                 bnx2_setup_wol(bp);
4046                 pci_wake_from_d3(bp->pdev, bp->wol);
4047                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4048                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4049
4050                         if (bp->wol)
4051                                 pci_set_power_state(bp->pdev, PCI_D3hot);
4052                         break;
4053
4054                 }
4055                 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4056                         u32 val;
4057
4058                         /* Tell firmware not to power down the PHY yet,
4059                          * otherwise the other port may not respond to
4060                          * MMIO reads.
4061                          */
4062                         val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4063                         val &= ~BNX2_CONDITION_PM_STATE_MASK;
4064                         val |= BNX2_CONDITION_PM_STATE_UNPREP;
4065                         bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4066                 }
4067                 pci_set_power_state(bp->pdev, PCI_D3hot);
4068
4069                 /* No more memory access after this point until
4070                  * device is brought back to D0.
4071                  */
4072                 break;
4073         }
4074         default:
4075                 return -EINVAL;
4076         }
4077         return 0;
4078 }
4079
4080 static int
4081 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4082 {
4083         u32 val;
4084         int j;
4085
4086         /* Request access to the flash interface. */
4087         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4088         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4089                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4090                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4091                         break;
4092
4093                 udelay(5);
4094         }
4095
4096         if (j >= NVRAM_TIMEOUT_COUNT)
4097                 return -EBUSY;
4098
4099         return 0;
4100 }
4101
4102 static int
4103 bnx2_release_nvram_lock(struct bnx2 *bp)
4104 {
4105         int j;
4106         u32 val;
4107
4108         /* Relinquish nvram interface. */
4109         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4110
4111         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4112                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4113                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4114                         break;
4115
4116                 udelay(5);
4117         }
4118
4119         if (j >= NVRAM_TIMEOUT_COUNT)
4120                 return -EBUSY;
4121
4122         return 0;
4123 }
4124
4125
4126 static int
4127 bnx2_enable_nvram_write(struct bnx2 *bp)
4128 {
4129         u32 val;
4130
4131         val = BNX2_RD(bp, BNX2_MISC_CFG);
4132         BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4133
4134         if (bp->flash_info->flags & BNX2_NV_WREN) {
4135                 int j;
4136
4137                 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4138                 BNX2_WR(bp, BNX2_NVM_COMMAND,
4139                         BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4140
4141                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4142                         udelay(5);
4143
4144                         val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4145                         if (val & BNX2_NVM_COMMAND_DONE)
4146                                 break;
4147                 }
4148
4149                 if (j >= NVRAM_TIMEOUT_COUNT)
4150                         return -EBUSY;
4151         }
4152         return 0;
4153 }
4154
4155 static void
4156 bnx2_disable_nvram_write(struct bnx2 *bp)
4157 {
4158         u32 val;
4159
4160         val = BNX2_RD(bp, BNX2_MISC_CFG);
4161         BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4162 }
4163
4164
4165 static void
4166 bnx2_enable_nvram_access(struct bnx2 *bp)
4167 {
4168         u32 val;
4169
4170         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4171         /* Enable both bits, even on read. */
4172         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4173                 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4174 }
4175
4176 static void
4177 bnx2_disable_nvram_access(struct bnx2 *bp)
4178 {
4179         u32 val;
4180
4181         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4182         /* Disable both bits, even after read. */
4183         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4184                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4185                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4186 }
4187
4188 static int
4189 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4190 {
4191         u32 cmd;
4192         int j;
4193
4194         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4195                 /* Buffered flash, no erase needed */
4196                 return 0;
4197
4198         /* Build an erase command */
4199         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4200               BNX2_NVM_COMMAND_DOIT;
4201
4202         /* Need to clear DONE bit separately. */
4203         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4204
4205         /* Address of the NVRAM to read from. */
4206         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4207
4208         /* Issue an erase command. */
4209         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4210
4211         /* Wait for completion. */
4212         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4213                 u32 val;
4214
4215                 udelay(5);
4216
4217                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4218                 if (val & BNX2_NVM_COMMAND_DONE)
4219                         break;
4220         }
4221
4222         if (j >= NVRAM_TIMEOUT_COUNT)
4223                 return -EBUSY;
4224
4225         return 0;
4226 }
4227
4228 static int
4229 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4230 {
4231         u32 cmd;
4232         int j;
4233
4234         /* Build the command word. */
4235         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4236
4237         /* Calculate an offset of a buffered flash, not needed for 5709. */
4238         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4239                 offset = ((offset / bp->flash_info->page_size) <<
4240                            bp->flash_info->page_bits) +
4241                           (offset % bp->flash_info->page_size);
4242         }
4243
4244         /* Need to clear DONE bit separately. */
4245         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4246
4247         /* Address of the NVRAM to read from. */
4248         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4249
4250         /* Issue a read command. */
4251         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4252
4253         /* Wait for completion. */
4254         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4255                 u32 val;
4256
4257                 udelay(5);
4258
4259                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4260                 if (val & BNX2_NVM_COMMAND_DONE) {
4261                         __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4262                         memcpy(ret_val, &v, 4);
4263                         break;
4264                 }
4265         }
4266         if (j >= NVRAM_TIMEOUT_COUNT)
4267                 return -EBUSY;
4268
4269         return 0;
4270 }
4271
4272
4273 static int
4274 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4275 {
4276         u32 cmd;
4277         __be32 val32;
4278         int j;
4279
4280         /* Build the command word. */
4281         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4282
4283         /* Calculate an offset of a buffered flash, not needed for 5709. */
4284         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4285                 offset = ((offset / bp->flash_info->page_size) <<
4286                           bp->flash_info->page_bits) +
4287                          (offset % bp->flash_info->page_size);
4288         }
4289
4290         /* Need to clear DONE bit separately. */
4291         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4292
4293         memcpy(&val32, val, 4);
4294
4295         /* Write the data. */
4296         BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4297
4298         /* Address of the NVRAM to write to. */
4299         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4300
4301         /* Issue the write command. */
4302         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4303
4304         /* Wait for completion. */
4305         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4306                 udelay(5);
4307
4308                 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4309                         break;
4310         }
4311         if (j >= NVRAM_TIMEOUT_COUNT)
4312                 return -EBUSY;
4313
4314         return 0;
4315 }
4316
4317 static int
4318 bnx2_init_nvram(struct bnx2 *bp)
4319 {
4320         u32 val;
4321         int j, entry_count, rc = 0;
4322         const struct flash_spec *flash;
4323
4324         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4325                 bp->flash_info = &flash_5709;
4326                 goto get_flash_size;
4327         }
4328
4329         /* Determine the selected interface. */
4330         val = BNX2_RD(bp, BNX2_NVM_CFG1);
4331
4332         entry_count = ARRAY_SIZE(flash_table);
4333
4334         if (val & 0x40000000) {
4335
4336                 /* Flash interface has been reconfigured */
4337                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4338                      j++, flash++) {
4339                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4340                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4341                                 bp->flash_info = flash;
4342                                 break;
4343                         }
4344                 }
4345         }
4346         else {
4347                 u32 mask;
4348                 /* Not yet been reconfigured */
4349
4350                 if (val & (1 << 23))
4351                         mask = FLASH_BACKUP_STRAP_MASK;
4352                 else
4353                         mask = FLASH_STRAP_MASK;
4354
4355                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4356                         j++, flash++) {
4357
4358                         if ((val & mask) == (flash->strapping & mask)) {
4359                                 bp->flash_info = flash;
4360
4361                                 /* Request access to the flash interface. */
4362                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4363                                         return rc;
4364
4365                                 /* Enable access to flash interface */
4366                                 bnx2_enable_nvram_access(bp);
4367
4368                                 /* Reconfigure the flash interface */
4369                                 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4370                                 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4371                                 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4372                                 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4373
4374                                 /* Disable access to flash interface */
4375                                 bnx2_disable_nvram_access(bp);
4376                                 bnx2_release_nvram_lock(bp);
4377
4378                                 break;
4379                         }
4380                 }
4381         } /* if (val & 0x40000000) */
4382
4383         if (j == entry_count) {
4384                 bp->flash_info = NULL;
4385                 pr_alert("Unknown flash/EEPROM type\n");
4386                 return -ENODEV;
4387         }
4388
4389 get_flash_size:
4390         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4391         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4392         if (val)
4393                 bp->flash_size = val;
4394         else
4395                 bp->flash_size = bp->flash_info->total_size;
4396
4397         return rc;
4398 }
4399
4400 static int
4401 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4402                 int buf_size)
4403 {
4404         int rc = 0;
4405         u32 cmd_flags, offset32, len32, extra;
4406
4407         if (buf_size == 0)
4408                 return 0;
4409
4410         /* Request access to the flash interface. */
4411         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4412                 return rc;
4413
4414         /* Enable access to flash interface */
4415         bnx2_enable_nvram_access(bp);
4416
4417         len32 = buf_size;
4418         offset32 = offset;
4419         extra = 0;
4420
4421         cmd_flags = 0;
4422
4423         if (offset32 & 3) {
4424                 u8 buf[4];
4425                 u32 pre_len;
4426
4427                 offset32 &= ~3;
4428                 pre_len = 4 - (offset & 3);
4429
4430                 if (pre_len >= len32) {
4431                         pre_len = len32;
4432                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4433                                     BNX2_NVM_COMMAND_LAST;
4434                 }
4435                 else {
4436                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4437                 }
4438
4439                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4440
4441                 if (rc)
4442                         return rc;
4443
4444                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4445
4446                 offset32 += 4;
4447                 ret_buf += pre_len;
4448                 len32 -= pre_len;
4449         }
4450         if (len32 & 3) {
4451                 extra = 4 - (len32 & 3);
4452                 len32 = (len32 + 4) & ~3;
4453         }
4454
4455         if (len32 == 4) {
4456                 u8 buf[4];
4457
4458                 if (cmd_flags)
4459                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4460                 else
4461                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4462                                     BNX2_NVM_COMMAND_LAST;
4463
4464                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4465
4466                 memcpy(ret_buf, buf, 4 - extra);
4467         }
4468         else if (len32 > 0) {
4469                 u8 buf[4];
4470
4471                 /* Read the first word. */
4472                 if (cmd_flags)
4473                         cmd_flags = 0;
4474                 else
4475                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4476
4477                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4478
4479                 /* Advance to the next dword. */
4480                 offset32 += 4;
4481                 ret_buf += 4;
4482                 len32 -= 4;
4483
4484                 while (len32 > 4 && rc == 0) {
4485                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4486
4487                         /* Advance to the next dword. */
4488                         offset32 += 4;
4489                         ret_buf += 4;
4490                         len32 -= 4;
4491                 }
4492
4493                 if (rc)
4494                         return rc;
4495
4496                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4497                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4498
4499                 memcpy(ret_buf, buf, 4 - extra);
4500         }
4501
4502         /* Disable access to flash interface */
4503         bnx2_disable_nvram_access(bp);
4504
4505         bnx2_release_nvram_lock(bp);
4506
4507         return rc;
4508 }
4509
4510 static int
4511 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4512                 int buf_size)
4513 {
4514         u32 written, offset32, len32;
4515         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4516         int rc = 0;
4517         int align_start, align_end;
4518
4519         buf = data_buf;
4520         offset32 = offset;
4521         len32 = buf_size;
4522         align_start = align_end = 0;
4523
4524         if ((align_start = (offset32 & 3))) {
4525                 offset32 &= ~3;
4526                 len32 += align_start;
4527                 if (len32 < 4)
4528                         len32 = 4;
4529                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4530                         return rc;
4531         }
4532
4533         if (len32 & 3) {
4534                 align_end = 4 - (len32 & 3);
4535                 len32 += align_end;
4536                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4537                         return rc;
4538         }
4539
4540         if (align_start || align_end) {
4541                 align_buf = kmalloc(len32, GFP_KERNEL);
4542                 if (align_buf == NULL)
4543                         return -ENOMEM;
4544                 if (align_start) {
4545                         memcpy(align_buf, start, 4);
4546                 }
4547                 if (align_end) {
4548                         memcpy(align_buf + len32 - 4, end, 4);
4549                 }
4550                 memcpy(align_buf + align_start, data_buf, buf_size);
4551                 buf = align_buf;
4552         }
4553
4554         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4555                 flash_buffer = kmalloc(264, GFP_KERNEL);
4556                 if (flash_buffer == NULL) {
4557                         rc = -ENOMEM;
4558                         goto nvram_write_end;
4559                 }
4560         }
4561
4562         written = 0;
4563         while ((written < len32) && (rc == 0)) {
4564                 u32 page_start, page_end, data_start, data_end;
4565                 u32 addr, cmd_flags;
4566                 int i;
4567
4568                 /* Find the page_start addr */
4569                 page_start = offset32 + written;
4570                 page_start -= (page_start % bp->flash_info->page_size);
4571                 /* Find the page_end addr */
4572                 page_end = page_start + bp->flash_info->page_size;
4573                 /* Find the data_start addr */
4574                 data_start = (written == 0) ? offset32 : page_start;
4575                 /* Find the data_end addr */
4576                 data_end = (page_end > offset32 + len32) ?
4577                         (offset32 + len32) : page_end;
4578
4579                 /* Request access to the flash interface. */
4580                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4581                         goto nvram_write_end;
4582
4583                 /* Enable access to flash interface */
4584                 bnx2_enable_nvram_access(bp);
4585
4586                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4587                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4588                         int j;
4589
4590                         /* Read the whole page into the buffer
4591                          * (non-buffer flash only) */
4592                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4593                                 if (j == (bp->flash_info->page_size - 4)) {
4594                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4595                                 }
4596                                 rc = bnx2_nvram_read_dword(bp,
4597                                         page_start + j,
4598                                         &flash_buffer[j],
4599                                         cmd_flags);
4600
4601                                 if (rc)
4602                                         goto nvram_write_end;
4603
4604                                 cmd_flags = 0;
4605                         }
4606                 }
4607
4608                 /* Enable writes to flash interface (unlock write-protect) */
4609                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4610                         goto nvram_write_end;
4611
4612                 /* Loop to write back the buffer data from page_start to
4613                  * data_start */
4614                 i = 0;
4615                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4616                         /* Erase the page */
4617                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4618                                 goto nvram_write_end;
4619
4620                         /* Re-enable the write again for the actual write */
4621                         bnx2_enable_nvram_write(bp);
4622
4623                         for (addr = page_start; addr < data_start;
4624                                 addr += 4, i += 4) {
4625
4626                                 rc = bnx2_nvram_write_dword(bp, addr,
4627                                         &flash_buffer[i], cmd_flags);
4628
4629                                 if (rc != 0)
4630                                         goto nvram_write_end;
4631
4632                                 cmd_flags = 0;
4633                         }
4634                 }
4635
4636                 /* Loop to write the new data from data_start to data_end */
4637                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4638                         if ((addr == page_end - 4) ||
4639                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4640                                  (addr == data_end - 4))) {
4641
4642                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4643                         }
4644                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4645                                 cmd_flags);
4646
4647                         if (rc != 0)
4648                                 goto nvram_write_end;
4649
4650                         cmd_flags = 0;
4651                         buf += 4;
4652                 }
4653
4654                 /* Loop to write back the buffer data from data_end
4655                  * to page_end */
4656                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4657                         for (addr = data_end; addr < page_end;
4658                                 addr += 4, i += 4) {
4659
4660                                 if (addr == page_end-4) {
4661                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4662                                 }
4663                                 rc = bnx2_nvram_write_dword(bp, addr,
4664                                         &flash_buffer[i], cmd_flags);
4665
4666                                 if (rc != 0)
4667                                         goto nvram_write_end;
4668
4669                                 cmd_flags = 0;
4670                         }
4671                 }
4672
4673                 /* Disable writes to flash interface (lock write-protect) */
4674                 bnx2_disable_nvram_write(bp);
4675
4676                 /* Disable access to flash interface */
4677                 bnx2_disable_nvram_access(bp);
4678                 bnx2_release_nvram_lock(bp);
4679
4680                 /* Increment written */
4681                 written += data_end - data_start;
4682         }
4683
4684 nvram_write_end:
4685         kfree(flash_buffer);
4686         kfree(align_buf);
4687         return rc;
4688 }
4689
4690 static void
4691 bnx2_init_fw_cap(struct bnx2 *bp)
4692 {
4693         u32 val, sig = 0;
4694
4695         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4696         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4697
4698         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4699                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4700
4701         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4702         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4703                 return;
4704
4705         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4706                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4707                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4708         }
4709
4710         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4711             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4712                 u32 link;
4713
4714                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4715
4716                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4717                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4718                         bp->phy_port = PORT_FIBRE;
4719                 else
4720                         bp->phy_port = PORT_TP;
4721
4722                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4723                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4724         }
4725
4726         if (netif_running(bp->dev) && sig)
4727                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4728 }
4729
4730 static void
4731 bnx2_setup_msix_tbl(struct bnx2 *bp)
4732 {
4733         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4734
4735         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4736         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4737 }
4738
4739 static int
4740 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4741 {
4742         u32 val;
4743         int i, rc = 0;
4744         u8 old_port;
4745
4746         /* Wait for the current PCI transaction to complete before
4747          * issuing a reset. */
4748         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4749             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4750                 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4751                         BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4752                         BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4753                         BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4754                         BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4755                 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4756                 udelay(5);
4757         } else {  /* 5709 */
4758                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4759                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4760                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4761                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4762
4763                 for (i = 0; i < 100; i++) {
4764                         msleep(1);
4765                         val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4766                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4767                                 break;
4768                 }
4769         }
4770
4771         /* Wait for the firmware to tell us it is ok to issue a reset. */
4772         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4773
4774         /* Deposit a driver reset signature so the firmware knows that
4775          * this is a soft reset. */
4776         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4777                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4778
4779         /* Do a dummy read to force the chip to complete all current transaction
4780          * before we issue a reset. */
4781         val = BNX2_RD(bp, BNX2_MISC_ID);
4782
4783         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4784                 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4785                 BNX2_RD(bp, BNX2_MISC_COMMAND);
4786                 udelay(5);
4787
4788                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4789                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4790
4791                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4792
4793         } else {
4794                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4795                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4796                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4797
4798                 /* Chip reset. */
4799                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4800
4801                 /* Reading back any register after chip reset will hang the
4802                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4803                  * of margin for write posting.
4804                  */
4805                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4806                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4807                         msleep(20);
4808
4809                 /* Reset takes approximate 30 usec */
4810                 for (i = 0; i < 10; i++) {
4811                         val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4812                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4813                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4814                                 break;
4815                         udelay(10);
4816                 }
4817
4818                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4819                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4820                         pr_err("Chip reset did not complete\n");
4821                         return -EBUSY;
4822                 }
4823         }
4824
4825         /* Make sure byte swapping is properly configured. */
4826         val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4827         if (val != 0x01020304) {
4828                 pr_err("Chip not in correct endian mode\n");
4829                 return -ENODEV;
4830         }
4831
4832         /* Wait for the firmware to finish its initialization. */
4833         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4834         if (rc)
4835                 return rc;
4836
4837         spin_lock_bh(&bp->phy_lock);
4838         old_port = bp->phy_port;
4839         bnx2_init_fw_cap(bp);
4840         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4841             old_port != bp->phy_port)
4842                 bnx2_set_default_remote_link(bp);
4843         spin_unlock_bh(&bp->phy_lock);
4844
4845         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4846                 /* Adjust the voltage regular to two steps lower.  The default
4847                  * of this register is 0x0000000e. */
4848                 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4849
4850                 /* Remove bad rbuf memory from the free pool. */
4851                 rc = bnx2_alloc_bad_rbuf(bp);
4852         }
4853
4854         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4855                 bnx2_setup_msix_tbl(bp);
4856                 /* Prevent MSIX table reads and write from timing out */
4857                 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4858                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4859         }
4860
4861         return rc;
4862 }
4863
4864 static int
4865 bnx2_init_chip(struct bnx2 *bp)
4866 {
4867         u32 val, mtu;
4868         int rc, i;
4869
4870         /* Make sure the interrupt is not active. */
4871         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4872
4873         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4874               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4875 #ifdef __BIG_ENDIAN
4876               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4877 #endif
4878               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4879               DMA_READ_CHANS << 12 |
4880               DMA_WRITE_CHANS << 16;
4881
4882         val |= (0x2 << 20) | (1 << 11);
4883
4884         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4885                 val |= (1 << 23);
4886
4887         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4888             (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4889             !(bp->flags & BNX2_FLAG_PCIX))
4890                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4891
4892         BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4893
4894         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4895                 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4896                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4897                 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4898         }
4899
4900         if (bp->flags & BNX2_FLAG_PCIX) {
4901                 u16 val16;
4902
4903                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4904                                      &val16);
4905                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4906                                       val16 & ~PCI_X_CMD_ERO);
4907         }
4908
4909         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4910                 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4911                 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4912                 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4913
4914         /* Initialize context mapping and zero out the quick contexts.  The
4915          * context block must have already been enabled. */
4916         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4917                 rc = bnx2_init_5709_context(bp);
4918                 if (rc)
4919                         return rc;
4920         } else
4921                 bnx2_init_context(bp);
4922
4923         if ((rc = bnx2_init_cpus(bp)) != 0)
4924                 return rc;
4925
4926         bnx2_init_nvram(bp);
4927
4928         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4929
4930         val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4931         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4932         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4933         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4934                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4935                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4936                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4937         }
4938
4939         BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4940
4941         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4942         BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4943         BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4944
4945         val = (BNX2_PAGE_BITS - 8) << 24;
4946         BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4947
4948         /* Configure page size. */
4949         val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4950         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4951         val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4952         BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4953
4954         val = bp->mac_addr[0] +
4955               (bp->mac_addr[1] << 8) +
4956               (bp->mac_addr[2] << 16) +
4957               bp->mac_addr[3] +
4958               (bp->mac_addr[4] << 8) +
4959               (bp->mac_addr[5] << 16);
4960         BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4961
4962         /* Program the MTU.  Also include 4 bytes for CRC32. */
4963         mtu = bp->dev->mtu;
4964         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4965         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4966                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4967         BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4968
4969         if (mtu < 1500)
4970                 mtu = 1500;
4971
4972         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4973         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4974         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4975
4976         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4977         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4978                 bp->bnx2_napi[i].last_status_idx = 0;
4979
4980         bp->idle_chk_status_idx = 0xffff;
4981
4982         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4983
4984         /* Set up how to generate a link change interrupt. */
4985         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4986
4987         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4988                 (u64) bp->status_blk_mapping & 0xffffffff);
4989         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4990
4991         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4992                 (u64) bp->stats_blk_mapping & 0xffffffff);
4993         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4994                 (u64) bp->stats_blk_mapping >> 32);
4995
4996         BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4997                 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4998
4999         BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5000                 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5001
5002         BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5003                 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5004
5005         BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5006
5007         BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5008
5009         BNX2_WR(bp, BNX2_HC_COM_TICKS,
5010                 (bp->com_ticks_int << 16) | bp->com_ticks);
5011
5012         BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5013                 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5014
5015         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5016                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5017         else
5018                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5019         BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5020
5021         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5022                 val = BNX2_HC_CONFIG_COLLECT_STATS;
5023         else {
5024                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5025                       BNX2_HC_CONFIG_COLLECT_STATS;
5026         }
5027
5028         if (bp->flags & BNX2_FLAG_USING_MSIX) {
5029                 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5030                         BNX2_HC_MSIX_BIT_VECTOR_VAL);
5031
5032                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5033         }
5034
5035         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5036                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5037
5038         BNX2_WR(bp, BNX2_HC_CONFIG, val);
5039
5040         if (bp->rx_ticks < 25)
5041                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5042         else
5043                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5044
5045         for (i = 1; i < bp->irq_nvecs; i++) {
5046                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5047                            BNX2_HC_SB_CONFIG_1;
5048
5049                 BNX2_WR(bp, base,
5050                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5051                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5052                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5053
5054                 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5055                         (bp->tx_quick_cons_trip_int << 16) |
5056                          bp->tx_quick_cons_trip);
5057
5058                 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5059                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5060
5061                 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5062                         (bp->rx_quick_cons_trip_int << 16) |
5063                         bp->rx_quick_cons_trip);
5064
5065                 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5066                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5067         }
5068
5069         /* Clear internal stats counters. */
5070         BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5071
5072         BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5073
5074         /* Initialize the receive filter. */
5075         bnx2_set_rx_mode(bp->dev);
5076
5077         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5078                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5079                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5080                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5081         }
5082         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5083                           1, 0);
5084
5085         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5086         BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5087
5088         udelay(20);
5089
5090         bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5091
5092         return rc;
5093 }
5094
5095 static void
5096 bnx2_clear_ring_states(struct bnx2 *bp)
5097 {
5098         struct bnx2_napi *bnapi;
5099         struct bnx2_tx_ring_info *txr;
5100         struct bnx2_rx_ring_info *rxr;
5101         int i;
5102
5103         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5104                 bnapi = &bp->bnx2_napi[i];
5105                 txr = &bnapi->tx_ring;
5106                 rxr = &bnapi->rx_ring;
5107
5108                 txr->tx_cons = 0;
5109                 txr->hw_tx_cons = 0;
5110                 rxr->rx_prod_bseq = 0;
5111                 rxr->rx_prod = 0;
5112                 rxr->rx_cons = 0;
5113                 rxr->rx_pg_prod = 0;
5114                 rxr->rx_pg_cons = 0;
5115         }
5116 }
5117
5118 static void
5119 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5120 {
5121         u32 val, offset0, offset1, offset2, offset3;
5122         u32 cid_addr = GET_CID_ADDR(cid);
5123
5124         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5125                 offset0 = BNX2_L2CTX_TYPE_XI;
5126                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5127                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5128                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5129         } else {
5130                 offset0 = BNX2_L2CTX_TYPE;
5131                 offset1 = BNX2_L2CTX_CMD_TYPE;
5132                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5133                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5134         }
5135         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5136         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5137
5138         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5139         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5140
5141         val = (u64) txr->tx_desc_mapping >> 32;
5142         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5143
5144         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5145         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5146 }
5147
5148 static void
5149 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5150 {
5151         struct bnx2_tx_bd *txbd;
5152         u32 cid = TX_CID;
5153         struct bnx2_napi *bnapi;
5154         struct bnx2_tx_ring_info *txr;
5155
5156         bnapi = &bp->bnx2_napi[ring_num];
5157         txr = &bnapi->tx_ring;
5158
5159         if (ring_num == 0)
5160                 cid = TX_CID;
5161         else
5162                 cid = TX_TSS_CID + ring_num - 1;
5163
5164         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5165
5166         txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5167
5168         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5169         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5170
5171         txr->tx_prod = 0;
5172         txr->tx_prod_bseq = 0;
5173
5174         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5175         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5176
5177         bnx2_init_tx_context(bp, cid, txr);
5178 }
5179
5180 static void
5181 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5182                      u32 buf_size, int num_rings)
5183 {
5184         int i;
5185         struct bnx2_rx_bd *rxbd;
5186
5187         for (i = 0; i < num_rings; i++) {
5188                 int j;
5189
5190                 rxbd = &rx_ring[i][0];
5191                 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5192                         rxbd->rx_bd_len = buf_size;
5193                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5194                 }
5195                 if (i == (num_rings - 1))
5196                         j = 0;
5197                 else
5198                         j = i + 1;
5199                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5200                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5201         }
5202 }
5203
5204 static void
5205 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5206 {
5207         int i;
5208         u16 prod, ring_prod;
5209         u32 cid, rx_cid_addr, val;
5210         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5211         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5212
5213         if (ring_num == 0)
5214                 cid = RX_CID;
5215         else
5216                 cid = RX_RSS_CID + ring_num - 1;
5217
5218         rx_cid_addr = GET_CID_ADDR(cid);
5219
5220         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5221                              bp->rx_buf_use_size, bp->rx_max_ring);
5222
5223         bnx2_init_rx_context(bp, cid);
5224
5225         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5226                 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5227                 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5228         }
5229
5230         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5231         if (bp->rx_pg_ring_size) {
5232                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5233                                      rxr->rx_pg_desc_mapping,
5234                                      PAGE_SIZE, bp->rx_max_pg_ring);
5235                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5236                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5237                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5238                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5239
5240                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5241                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5242
5243                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5244                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5245
5246                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5247                         BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5248         }
5249
5250         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5251         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5252
5253         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5254         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5255
5256         ring_prod = prod = rxr->rx_pg_prod;
5257         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5258                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5259                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5260                                     ring_num, i, bp->rx_pg_ring_size);
5261                         break;
5262                 }
5263                 prod = BNX2_NEXT_RX_BD(prod);
5264                 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5265         }
5266         rxr->rx_pg_prod = prod;
5267
5268         ring_prod = prod = rxr->rx_prod;
5269         for (i = 0; i < bp->rx_ring_size; i++) {
5270                 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5271                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5272                                     ring_num, i, bp->rx_ring_size);
5273                         break;
5274                 }
5275                 prod = BNX2_NEXT_RX_BD(prod);
5276                 ring_prod = BNX2_RX_RING_IDX(prod);
5277         }
5278         rxr->rx_prod = prod;
5279
5280         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5281         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5282         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5283
5284         BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5285         BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5286
5287         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5288 }
5289
5290 static void
5291 bnx2_init_all_rings(struct bnx2 *bp)
5292 {
5293         int i;
5294         u32 val;
5295
5296         bnx2_clear_ring_states(bp);
5297
5298         BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5299         for (i = 0; i < bp->num_tx_rings; i++)
5300                 bnx2_init_tx_ring(bp, i);
5301
5302         if (bp->num_tx_rings > 1)
5303                 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5304                         (TX_TSS_CID << 7));
5305
5306         BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5307         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5308
5309         for (i = 0; i < bp->num_rx_rings; i++)
5310                 bnx2_init_rx_ring(bp, i);
5311
5312         if (bp->num_rx_rings > 1) {
5313                 u32 tbl_32 = 0;
5314
5315                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5316                         int shift = (i % 8) << 2;
5317
5318                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5319                         if ((i % 8) == 7) {
5320                                 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5321                                 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5322                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5323                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5324                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5325                                 tbl_32 = 0;
5326                         }
5327                 }
5328
5329                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5330                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5331
5332                 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5333
5334         }
5335 }
5336
5337 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5338 {
5339         u32 max, num_rings = 1;
5340
5341         while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5342                 ring_size -= BNX2_MAX_RX_DESC_CNT;
5343                 num_rings++;
5344         }
5345         /* round to next power of 2 */
5346         max = max_size;
5347         while ((max & num_rings) == 0)
5348                 max >>= 1;
5349
5350         if (num_rings != max)
5351                 max <<= 1;
5352
5353         return max;
5354 }
5355
5356 static void
5357 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5358 {
5359         u32 rx_size, rx_space, jumbo_size;
5360
5361         /* 8 for CRC and VLAN */
5362         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5363
5364         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5365                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5366
5367         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5368         bp->rx_pg_ring_size = 0;
5369         bp->rx_max_pg_ring = 0;
5370         bp->rx_max_pg_ring_idx = 0;
5371         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5372                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5373
5374                 jumbo_size = size * pages;
5375                 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5376                         jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5377
5378                 bp->rx_pg_ring_size = jumbo_size;
5379                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5380                                                         BNX2_MAX_RX_PG_RINGS);
5381                 bp->rx_max_pg_ring_idx =
5382                         (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5383                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5384                 bp->rx_copy_thresh = 0;
5385         }
5386
5387         bp->rx_buf_use_size = rx_size;
5388         /* hw alignment + build_skb() overhead*/
5389         bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5390                 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5391         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5392         bp->rx_ring_size = size;
5393         bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5394         bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5395 }
5396
5397 static void
5398 bnx2_free_tx_skbs(struct bnx2 *bp)
5399 {
5400         int i;
5401
5402         for (i = 0; i < bp->num_tx_rings; i++) {
5403                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5404                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5405                 int j;
5406
5407                 if (txr->tx_buf_ring == NULL)
5408                         continue;
5409
5410                 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5411                         struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5412                         struct sk_buff *skb = tx_buf->skb;
5413                         int k, last;
5414
5415                         if (skb == NULL) {
5416                                 j = BNX2_NEXT_TX_BD(j);
5417                                 continue;
5418                         }
5419
5420                         dma_unmap_single(&bp->pdev->dev,
5421                                          dma_unmap_addr(tx_buf, mapping),
5422                                          skb_headlen(skb),
5423                                          PCI_DMA_TODEVICE);
5424
5425                         tx_buf->skb = NULL;
5426
5427                         last = tx_buf->nr_frags;
5428                         j = BNX2_NEXT_TX_BD(j);
5429                         for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5430                                 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5431                                 dma_unmap_page(&bp->pdev->dev,
5432                                         dma_unmap_addr(tx_buf, mapping),
5433                                         skb_frag_size(&skb_shinfo(skb)->frags[k]),
5434                                         PCI_DMA_TODEVICE);
5435                         }
5436                         dev_kfree_skb(skb);
5437                 }
5438                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5439         }
5440 }
5441
5442 static void
5443 bnx2_free_rx_skbs(struct bnx2 *bp)
5444 {
5445         int i;
5446
5447         for (i = 0; i < bp->num_rx_rings; i++) {
5448                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5449                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5450                 int j;
5451
5452                 if (rxr->rx_buf_ring == NULL)
5453                         return;
5454
5455                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5456                         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5457                         u8 *data = rx_buf->data;
5458
5459                         if (data == NULL)
5460                                 continue;
5461
5462                         dma_unmap_single(&bp->pdev->dev,
5463                                          dma_unmap_addr(rx_buf, mapping),
5464                                          bp->rx_buf_use_size,
5465                                          PCI_DMA_FROMDEVICE);
5466
5467                         rx_buf->data = NULL;
5468
5469                         kfree(data);
5470                 }
5471                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5472                         bnx2_free_rx_page(bp, rxr, j);
5473         }
5474 }
5475
5476 static void
5477 bnx2_free_skbs(struct bnx2 *bp)
5478 {
5479         bnx2_free_tx_skbs(bp);
5480         bnx2_free_rx_skbs(bp);
5481 }
5482
5483 static int
5484 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5485 {
5486         int rc;
5487
5488         rc = bnx2_reset_chip(bp, reset_code);
5489         bnx2_free_skbs(bp);
5490         if (rc)
5491                 return rc;
5492
5493         if ((rc = bnx2_init_chip(bp)) != 0)
5494                 return rc;
5495
5496         bnx2_init_all_rings(bp);
5497         return 0;
5498 }
5499
5500 static int
5501 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5502 {
5503         int rc;
5504
5505         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5506                 return rc;
5507
5508         spin_lock_bh(&bp->phy_lock);
5509         bnx2_init_phy(bp, reset_phy);
5510         bnx2_set_link(bp);
5511         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5512                 bnx2_remote_phy_event(bp);
5513         spin_unlock_bh(&bp->phy_lock);
5514         return 0;
5515 }
5516
5517 static int
5518 bnx2_shutdown_chip(struct bnx2 *bp)
5519 {
5520         u32 reset_code;
5521
5522         if (bp->flags & BNX2_FLAG_NO_WOL)
5523                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5524         else if (bp->wol)
5525                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5526         else
5527                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5528
5529         return bnx2_reset_chip(bp, reset_code);
5530 }
5531
5532 static int
5533 bnx2_test_registers(struct bnx2 *bp)
5534 {
5535         int ret;
5536         int i, is_5709;
5537         static const struct {
5538                 u16   offset;
5539                 u16   flags;
5540 #define BNX2_FL_NOT_5709        1
5541                 u32   rw_mask;
5542                 u32   ro_mask;
5543         } reg_tbl[] = {
5544                 { 0x006c, 0, 0x00000000, 0x0000003f },
5545                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5546                 { 0x0094, 0, 0x00000000, 0x00000000 },
5547
5548                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5549                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5550                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5551                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5552                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5553                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5554                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5555                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5556                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5557
5558                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5559                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5560                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5561                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5562                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5563                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5564
5565                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5566                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5567                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5568
5569                 { 0x1000, 0, 0x00000000, 0x00000001 },
5570                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5571
5572                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5573                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5574                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5575                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5576                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5577                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5578                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5579                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5580                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5581                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5582
5583                 { 0x1800, 0, 0x00000000, 0x00000001 },
5584                 { 0x1804, 0, 0x00000000, 0x00000003 },
5585
5586                 { 0x2800, 0, 0x00000000, 0x00000001 },
5587                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5588                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5589                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5590                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5591                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5592                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5593                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5594                 { 0x2840, 0, 0x00000000, 0xffffffff },
5595                 { 0x2844, 0, 0x00000000, 0xffffffff },
5596                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5597                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5598
5599                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5600                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5601
5602                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5603                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5604                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5605                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5606                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5607                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5608                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5609                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5610                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5611
5612                 { 0x5004, 0, 0x00000000, 0x0000007f },
5613                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5614
5615                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5616                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5617                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5618                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5619                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5620                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5621                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5622                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5623                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5624
5625                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5626                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5627                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5628                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5629                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5630                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5631                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5632                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5633                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5634                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5635                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5636                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5637                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5638                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5639                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5640                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5641                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5642                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5643                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5644                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5645                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5646                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5647                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5648
5649                 { 0xffff, 0, 0x00000000, 0x00000000 },
5650         };
5651
5652         ret = 0;
5653         is_5709 = 0;
5654         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5655                 is_5709 = 1;
5656
5657         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5658                 u32 offset, rw_mask, ro_mask, save_val, val;
5659                 u16 flags = reg_tbl[i].flags;
5660
5661                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5662                         continue;
5663
5664                 offset = (u32) reg_tbl[i].offset;
5665                 rw_mask = reg_tbl[i].rw_mask;
5666                 ro_mask = reg_tbl[i].ro_mask;
5667
5668                 save_val = readl(bp->regview + offset);
5669
5670                 writel(0, bp->regview + offset);
5671
5672                 val = readl(bp->regview + offset);
5673                 if ((val & rw_mask) != 0) {
5674                         goto reg_test_err;
5675                 }
5676
5677                 if ((val & ro_mask) != (save_val & ro_mask)) {
5678                         goto reg_test_err;
5679                 }
5680
5681                 writel(0xffffffff, bp->regview + offset);
5682
5683                 val = readl(bp->regview + offset);
5684                 if ((val & rw_mask) != rw_mask) {
5685                         goto reg_test_err;
5686                 }
5687
5688                 if ((val & ro_mask) != (save_val & ro_mask)) {
5689                         goto reg_test_err;
5690                 }
5691
5692                 writel(save_val, bp->regview + offset);
5693                 continue;
5694
5695 reg_test_err:
5696                 writel(save_val, bp->regview + offset);
5697                 ret = -ENODEV;
5698                 break;
5699         }
5700         return ret;
5701 }
5702
5703 static int
5704 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5705 {
5706         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5707                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5708         int i;
5709
5710         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5711                 u32 offset;
5712
5713                 for (offset = 0; offset < size; offset += 4) {
5714
5715                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5716
5717                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5718                                 test_pattern[i]) {
5719                                 return -ENODEV;
5720                         }
5721                 }
5722         }
5723         return 0;
5724 }
5725
5726 static int
5727 bnx2_test_memory(struct bnx2 *bp)
5728 {
5729         int ret = 0;
5730         int i;
5731         static struct mem_entry {
5732                 u32   offset;
5733                 u32   len;
5734         } mem_tbl_5706[] = {
5735                 { 0x60000,  0x4000 },
5736                 { 0xa0000,  0x3000 },
5737                 { 0xe0000,  0x4000 },
5738                 { 0x120000, 0x4000 },
5739                 { 0x1a0000, 0x4000 },
5740                 { 0x160000, 0x4000 },
5741                 { 0xffffffff, 0    },
5742         },
5743         mem_tbl_5709[] = {
5744                 { 0x60000,  0x4000 },
5745                 { 0xa0000,  0x3000 },
5746                 { 0xe0000,  0x4000 },
5747                 { 0x120000, 0x4000 },
5748                 { 0x1a0000, 0x4000 },
5749                 { 0xffffffff, 0    },
5750         };
5751         struct mem_entry *mem_tbl;
5752
5753         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5754                 mem_tbl = mem_tbl_5709;
5755         else
5756                 mem_tbl = mem_tbl_5706;
5757
5758         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5759                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5760                         mem_tbl[i].len)) != 0) {
5761                         return ret;
5762                 }
5763         }
5764
5765         return ret;
5766 }
5767
5768 #define BNX2_MAC_LOOPBACK       0
5769 #define BNX2_PHY_LOOPBACK       1
5770
5771 static int
5772 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5773 {
5774         unsigned int pkt_size, num_pkts, i;
5775         struct sk_buff *skb;
5776         u8 *data;
5777         unsigned char *packet;
5778         u16 rx_start_idx, rx_idx;
5779         dma_addr_t map;
5780         struct bnx2_tx_bd *txbd;
5781         struct bnx2_sw_bd *rx_buf;
5782         struct l2_fhdr *rx_hdr;
5783         int ret = -ENODEV;
5784         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5785         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5786         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5787
5788         tx_napi = bnapi;
5789
5790         txr = &tx_napi->tx_ring;
5791         rxr = &bnapi->rx_ring;
5792         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5793                 bp->loopback = MAC_LOOPBACK;
5794                 bnx2_set_mac_loopback(bp);
5795         }
5796         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5797                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5798                         return 0;
5799
5800                 bp->loopback = PHY_LOOPBACK;
5801                 bnx2_set_phy_loopback(bp);
5802         }
5803         else
5804                 return -EINVAL;
5805
5806         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5807         skb = netdev_alloc_skb(bp->dev, pkt_size);
5808         if (!skb)
5809                 return -ENOMEM;
5810         packet = skb_put(skb, pkt_size);
5811         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5812         memset(packet + ETH_ALEN, 0x0, 8);
5813         for (i = 14; i < pkt_size; i++)
5814                 packet[i] = (unsigned char) (i & 0xff);
5815
5816         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5817                              PCI_DMA_TODEVICE);
5818         if (dma_mapping_error(&bp->pdev->dev, map)) {
5819                 dev_kfree_skb(skb);
5820                 return -EIO;
5821         }
5822
5823         BNX2_WR(bp, BNX2_HC_COMMAND,
5824                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5825
5826         BNX2_RD(bp, BNX2_HC_COMMAND);
5827
5828         udelay(5);
5829         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5830
5831         num_pkts = 0;
5832
5833         txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5834
5835         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5836         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5837         txbd->tx_bd_mss_nbytes = pkt_size;
5838         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5839
5840         num_pkts++;
5841         txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5842         txr->tx_prod_bseq += pkt_size;
5843
5844         BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5845         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5846
5847         udelay(100);
5848
5849         BNX2_WR(bp, BNX2_HC_COMMAND,
5850                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5851
5852         BNX2_RD(bp, BNX2_HC_COMMAND);
5853
5854         udelay(5);
5855
5856         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5857         dev_kfree_skb(skb);
5858
5859         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5860                 goto loopback_test_done;
5861
5862         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5863         if (rx_idx != rx_start_idx + num_pkts) {
5864                 goto loopback_test_done;
5865         }
5866
5867         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5868         data = rx_buf->data;
5869
5870         rx_hdr = get_l2_fhdr(data);
5871         data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5872
5873         dma_sync_single_for_cpu(&bp->pdev->dev,
5874                 dma_unmap_addr(rx_buf, mapping),
5875                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5876
5877         if (rx_hdr->l2_fhdr_status &
5878                 (L2_FHDR_ERRORS_BAD_CRC |
5879                 L2_FHDR_ERRORS_PHY_DECODE |
5880                 L2_FHDR_ERRORS_ALIGNMENT |
5881                 L2_FHDR_ERRORS_TOO_SHORT |
5882                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5883
5884                 goto loopback_test_done;
5885         }
5886
5887         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5888                 goto loopback_test_done;
5889         }
5890
5891         for (i = 14; i < pkt_size; i++) {
5892                 if (*(data + i) != (unsigned char) (i & 0xff)) {
5893                         goto loopback_test_done;
5894                 }
5895         }
5896
5897         ret = 0;
5898
5899 loopback_test_done:
5900         bp->loopback = 0;
5901         return ret;
5902 }
5903
5904 #define BNX2_MAC_LOOPBACK_FAILED        1
5905 #define BNX2_PHY_LOOPBACK_FAILED        2
5906 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5907                                          BNX2_PHY_LOOPBACK_FAILED)
5908
5909 static int
5910 bnx2_test_loopback(struct bnx2 *bp)
5911 {
5912         int rc = 0;
5913
5914         if (!netif_running(bp->dev))
5915                 return BNX2_LOOPBACK_FAILED;
5916
5917         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5918         spin_lock_bh(&bp->phy_lock);
5919         bnx2_init_phy(bp, 1);
5920         spin_unlock_bh(&bp->phy_lock);
5921         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5922                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5923         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5924                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5925         return rc;
5926 }
5927
5928 #define NVRAM_SIZE 0x200
5929 #define CRC32_RESIDUAL 0xdebb20e3
5930
5931 static int
5932 bnx2_test_nvram(struct bnx2 *bp)
5933 {
5934         __be32 buf[NVRAM_SIZE / 4];
5935         u8 *data = (u8 *) buf;
5936         int rc = 0;
5937         u32 magic, csum;
5938
5939         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5940                 goto test_nvram_done;
5941
5942         magic = be32_to_cpu(buf[0]);
5943         if (magic != 0x669955aa) {
5944                 rc = -ENODEV;
5945                 goto test_nvram_done;
5946         }
5947
5948         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5949                 goto test_nvram_done;
5950
5951         csum = ether_crc_le(0x100, data);
5952         if (csum != CRC32_RESIDUAL) {
5953                 rc = -ENODEV;
5954                 goto test_nvram_done;
5955         }
5956
5957         csum = ether_crc_le(0x100, data + 0x100);
5958         if (csum != CRC32_RESIDUAL) {
5959                 rc = -ENODEV;
5960         }
5961
5962 test_nvram_done:
5963         return rc;
5964 }
5965
5966 static int
5967 bnx2_test_link(struct bnx2 *bp)
5968 {
5969         u32 bmsr;
5970
5971         if (!netif_running(bp->dev))
5972                 return -ENODEV;
5973
5974         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5975                 if (bp->link_up)
5976                         return 0;
5977                 return -ENODEV;
5978         }
5979         spin_lock_bh(&bp->phy_lock);
5980         bnx2_enable_bmsr1(bp);
5981         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5982         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5983         bnx2_disable_bmsr1(bp);
5984         spin_unlock_bh(&bp->phy_lock);
5985
5986         if (bmsr & BMSR_LSTATUS) {
5987                 return 0;
5988         }
5989         return -ENODEV;
5990 }
5991
5992 static int
5993 bnx2_test_intr(struct bnx2 *bp)
5994 {
5995         int i;
5996         u16 status_idx;
5997
5998         if (!netif_running(bp->dev))
5999                 return -ENODEV;
6000
6001         status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6002
6003         /* This register is not touched during run-time. */
6004         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6005         BNX2_RD(bp, BNX2_HC_COMMAND);
6006
6007         for (i = 0; i < 10; i++) {
6008                 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6009                         status_idx) {
6010
6011                         break;
6012                 }
6013
6014                 msleep_interruptible(10);
6015         }
6016         if (i < 10)
6017                 return 0;
6018
6019         return -ENODEV;
6020 }
6021
6022 /* Determining link for parallel detection. */
6023 static int
6024 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6025 {
6026         u32 mode_ctl, an_dbg, exp;
6027
6028         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6029                 return 0;
6030
6031         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6032         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6033
6034         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6035                 return 0;
6036
6037         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6038         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6039         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6040
6041         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6042                 return 0;
6043
6044         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6045         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6046         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6047
6048         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6049                 return 0;
6050
6051         return 1;
6052 }
6053
6054 static void
6055 bnx2_5706_serdes_timer(struct bnx2 *bp)
6056 {
6057         int check_link = 1;
6058
6059         spin_lock(&bp->phy_lock);
6060         if (bp->serdes_an_pending) {
6061                 bp->serdes_an_pending--;
6062                 check_link = 0;
6063         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6064                 u32 bmcr;
6065
6066                 bp->current_interval = BNX2_TIMER_INTERVAL;
6067
6068                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6069
6070                 if (bmcr & BMCR_ANENABLE) {
6071                         if (bnx2_5706_serdes_has_link(bp)) {
6072                                 bmcr &= ~BMCR_ANENABLE;
6073                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6074                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6075                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6076                         }
6077                 }
6078         }
6079         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6080                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6081                 u32 phy2;
6082
6083                 bnx2_write_phy(bp, 0x17, 0x0f01);
6084                 bnx2_read_phy(bp, 0x15, &phy2);
6085                 if (phy2 & 0x20) {
6086                         u32 bmcr;
6087
6088                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6089                         bmcr |= BMCR_ANENABLE;
6090                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6091
6092                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6093                 }
6094         } else
6095                 bp->current_interval = BNX2_TIMER_INTERVAL;
6096
6097         if (check_link) {
6098                 u32 val;
6099
6100                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6101                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6102                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6103
6104                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6105                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6106                                 bnx2_5706s_force_link_dn(bp, 1);
6107                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6108                         } else
6109                                 bnx2_set_link(bp);
6110                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6111                         bnx2_set_link(bp);
6112         }
6113         spin_unlock(&bp->phy_lock);
6114 }
6115
6116 static void
6117 bnx2_5708_serdes_timer(struct bnx2 *bp)
6118 {
6119         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6120                 return;
6121
6122         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6123                 bp->serdes_an_pending = 0;
6124                 return;
6125         }
6126
6127         spin_lock(&bp->phy_lock);
6128         if (bp->serdes_an_pending)
6129                 bp->serdes_an_pending--;
6130         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6131                 u32 bmcr;
6132
6133                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6134                 if (bmcr & BMCR_ANENABLE) {
6135                         bnx2_enable_forced_2g5(bp);
6136                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6137                 } else {
6138                         bnx2_disable_forced_2g5(bp);
6139                         bp->serdes_an_pending = 2;
6140                         bp->current_interval = BNX2_TIMER_INTERVAL;
6141                 }
6142
6143         } else
6144                 bp->current_interval = BNX2_TIMER_INTERVAL;
6145
6146         spin_unlock(&bp->phy_lock);
6147 }
6148
6149 static void
6150 bnx2_timer(unsigned long data)
6151 {
6152         struct bnx2 *bp = (struct bnx2 *) data;
6153
6154         if (!netif_running(bp->dev))
6155                 return;
6156
6157         if (atomic_read(&bp->intr_sem) != 0)
6158                 goto bnx2_restart_timer;
6159
6160         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6161              BNX2_FLAG_USING_MSI)
6162                 bnx2_chk_missed_msi(bp);
6163
6164         bnx2_send_heart_beat(bp);
6165
6166         bp->stats_blk->stat_FwRxDrop =
6167                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6168
6169         /* workaround occasional corrupted counters */
6170         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6171                 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6172                         BNX2_HC_COMMAND_STATS_NOW);
6173
6174         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6175                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6176                         bnx2_5706_serdes_timer(bp);
6177                 else
6178                         bnx2_5708_serdes_timer(bp);
6179         }
6180
6181 bnx2_restart_timer:
6182         mod_timer(&bp->timer, jiffies + bp->current_interval);
6183 }
6184
6185 static int
6186 bnx2_request_irq(struct bnx2 *bp)
6187 {
6188         unsigned long flags;
6189         struct bnx2_irq *irq;
6190         int rc = 0, i;
6191
6192         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6193                 flags = 0;
6194         else
6195                 flags = IRQF_SHARED;
6196
6197         for (i = 0; i < bp->irq_nvecs; i++) {
6198                 irq = &bp->irq_tbl[i];
6199                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6200                                  &bp->bnx2_napi[i]);
6201                 if (rc)
6202                         break;
6203                 irq->requested = 1;
6204         }
6205         return rc;
6206 }
6207
6208 static void
6209 __bnx2_free_irq(struct bnx2 *bp)
6210 {
6211         struct bnx2_irq *irq;
6212         int i;
6213
6214         for (i = 0; i < bp->irq_nvecs; i++) {
6215                 irq = &bp->irq_tbl[i];
6216                 if (irq->requested)
6217                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6218                 irq->requested = 0;
6219         }
6220 }
6221
6222 static void
6223 bnx2_free_irq(struct bnx2 *bp)
6224 {
6225
6226         __bnx2_free_irq(bp);
6227         if (bp->flags & BNX2_FLAG_USING_MSI)
6228                 pci_disable_msi(bp->pdev);
6229         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6230                 pci_disable_msix(bp->pdev);
6231
6232         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6233 }
6234
6235 static void
6236 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6237 {
6238         int i, total_vecs, rc;
6239         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6240         struct net_device *dev = bp->dev;
6241         const int len = sizeof(bp->irq_tbl[0].name);
6242
6243         bnx2_setup_msix_tbl(bp);
6244         BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6245         BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6246         BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6247
6248         /*  Need to flush the previous three writes to ensure MSI-X
6249          *  is setup properly */
6250         BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6251
6252         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6253                 msix_ent[i].entry = i;
6254                 msix_ent[i].vector = 0;
6255         }
6256
6257         total_vecs = msix_vecs;
6258 #ifdef BCM_CNIC
6259         total_vecs++;
6260 #endif
6261         rc = -ENOSPC;
6262         while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6263                 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6264                 if (rc <= 0)
6265                         break;
6266                 if (rc > 0)
6267                         total_vecs = rc;
6268         }
6269
6270         if (rc != 0)
6271                 return;
6272
6273         msix_vecs = total_vecs;
6274 #ifdef BCM_CNIC
6275         msix_vecs--;
6276 #endif
6277         bp->irq_nvecs = msix_vecs;
6278         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6279         for (i = 0; i < total_vecs; i++) {
6280                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6281                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6282                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6283         }
6284 }
6285
6286 static int
6287 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6288 {
6289         int cpus = netif_get_num_default_rss_queues();
6290         int msix_vecs;
6291
6292         if (!bp->num_req_rx_rings)
6293                 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6294         else if (!bp->num_req_tx_rings)
6295                 msix_vecs = max(cpus, bp->num_req_rx_rings);
6296         else
6297                 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6298
6299         msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6300
6301         bp->irq_tbl[0].handler = bnx2_interrupt;
6302         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6303         bp->irq_nvecs = 1;
6304         bp->irq_tbl[0].vector = bp->pdev->irq;
6305
6306         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6307                 bnx2_enable_msix(bp, msix_vecs);
6308
6309         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6310             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6311                 if (pci_enable_msi(bp->pdev) == 0) {
6312                         bp->flags |= BNX2_FLAG_USING_MSI;
6313                         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6314                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6315                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6316                         } else
6317                                 bp->irq_tbl[0].handler = bnx2_msi;
6318
6319                         bp->irq_tbl[0].vector = bp->pdev->irq;
6320                 }
6321         }
6322
6323         if (!bp->num_req_tx_rings)
6324                 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6325         else
6326                 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6327
6328         if (!bp->num_req_rx_rings)
6329                 bp->num_rx_rings = bp->irq_nvecs;
6330         else
6331                 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6332
6333         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6334
6335         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6336 }
6337
6338 /* Called with rtnl_lock */
6339 static int
6340 bnx2_open(struct net_device *dev)
6341 {
6342         struct bnx2 *bp = netdev_priv(dev);
6343         int rc;
6344
6345         rc = bnx2_request_firmware(bp);
6346         if (rc < 0)
6347                 goto out;
6348
6349         netif_carrier_off(dev);
6350
6351         bnx2_disable_int(bp);
6352
6353         rc = bnx2_setup_int_mode(bp, disable_msi);
6354         if (rc)
6355                 goto open_err;
6356         bnx2_init_napi(bp);
6357         bnx2_napi_enable(bp);
6358         rc = bnx2_alloc_mem(bp);
6359         if (rc)
6360                 goto open_err;
6361
6362         rc = bnx2_request_irq(bp);
6363         if (rc)
6364                 goto open_err;
6365
6366         rc = bnx2_init_nic(bp, 1);
6367         if (rc)
6368                 goto open_err;
6369
6370         mod_timer(&bp->timer, jiffies + bp->current_interval);
6371
6372         atomic_set(&bp->intr_sem, 0);
6373
6374         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6375
6376         bnx2_enable_int(bp);
6377
6378         if (bp->flags & BNX2_FLAG_USING_MSI) {
6379                 /* Test MSI to make sure it is working
6380                  * If MSI test fails, go back to INTx mode
6381                  */
6382                 if (bnx2_test_intr(bp) != 0) {
6383                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6384
6385                         bnx2_disable_int(bp);
6386                         bnx2_free_irq(bp);
6387
6388                         bnx2_setup_int_mode(bp, 1);
6389
6390                         rc = bnx2_init_nic(bp, 0);
6391
6392                         if (!rc)
6393                                 rc = bnx2_request_irq(bp);
6394
6395                         if (rc) {
6396                                 del_timer_sync(&bp->timer);
6397                                 goto open_err;
6398                         }
6399                         bnx2_enable_int(bp);
6400                 }
6401         }
6402         if (bp->flags & BNX2_FLAG_USING_MSI)
6403                 netdev_info(dev, "using MSI\n");
6404         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6405                 netdev_info(dev, "using MSIX\n");
6406
6407         netif_tx_start_all_queues(dev);
6408 out:
6409         return rc;
6410
6411 open_err:
6412         bnx2_napi_disable(bp);
6413         bnx2_free_skbs(bp);
6414         bnx2_free_irq(bp);
6415         bnx2_free_mem(bp);
6416         bnx2_del_napi(bp);
6417         bnx2_release_firmware(bp);
6418         goto out;
6419 }
6420
6421 static void
6422 bnx2_reset_task(struct work_struct *work)
6423 {
6424         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6425         int rc;
6426         u16 pcicmd;
6427
6428         rtnl_lock();
6429         if (!netif_running(bp->dev)) {
6430                 rtnl_unlock();
6431                 return;
6432         }
6433
6434         bnx2_netif_stop(bp, true);
6435
6436         pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6437         if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6438                 /* in case PCI block has reset */
6439                 pci_restore_state(bp->pdev);
6440                 pci_save_state(bp->pdev);
6441         }
6442         rc = bnx2_init_nic(bp, 1);
6443         if (rc) {
6444                 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6445                 bnx2_napi_enable(bp);
6446                 dev_close(bp->dev);
6447                 rtnl_unlock();
6448                 return;
6449         }
6450
6451         atomic_set(&bp->intr_sem, 1);
6452         bnx2_netif_start(bp, true);
6453         rtnl_unlock();
6454 }
6455
6456 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6457
6458 static void
6459 bnx2_dump_ftq(struct bnx2 *bp)
6460 {
6461         int i;
6462         u32 reg, bdidx, cid, valid;
6463         struct net_device *dev = bp->dev;
6464         static const struct ftq_reg {
6465                 char *name;
6466                 u32 off;
6467         } ftq_arr[] = {
6468                 BNX2_FTQ_ENTRY(RV2P_P),
6469                 BNX2_FTQ_ENTRY(RV2P_T),
6470                 BNX2_FTQ_ENTRY(RV2P_M),
6471                 BNX2_FTQ_ENTRY(TBDR_),
6472                 BNX2_FTQ_ENTRY(TDMA_),
6473                 BNX2_FTQ_ENTRY(TXP_),
6474                 BNX2_FTQ_ENTRY(TXP_),
6475                 BNX2_FTQ_ENTRY(TPAT_),
6476                 BNX2_FTQ_ENTRY(RXP_C),
6477                 BNX2_FTQ_ENTRY(RXP_),
6478                 BNX2_FTQ_ENTRY(COM_COMXQ_),
6479                 BNX2_FTQ_ENTRY(COM_COMTQ_),
6480                 BNX2_FTQ_ENTRY(COM_COMQ_),
6481                 BNX2_FTQ_ENTRY(CP_CPQ_),
6482         };
6483
6484         netdev_err(dev, "<--- start FTQ dump --->\n");
6485         for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6486                 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6487                            bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6488
6489         netdev_err(dev, "CPU states:\n");
6490         for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6491                 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6492                            reg, bnx2_reg_rd_ind(bp, reg),
6493                            bnx2_reg_rd_ind(bp, reg + 4),
6494                            bnx2_reg_rd_ind(bp, reg + 8),
6495                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6496                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6497                            bnx2_reg_rd_ind(bp, reg + 0x20));
6498
6499         netdev_err(dev, "<--- end FTQ dump --->\n");
6500         netdev_err(dev, "<--- start TBDC dump --->\n");
6501         netdev_err(dev, "TBDC free cnt: %ld\n",
6502                    BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6503         netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6504         for (i = 0; i < 0x20; i++) {
6505                 int j = 0;
6506
6507                 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6508                 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6509                         BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6510                 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6511                 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6512                         BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6513                         j++;
6514
6515                 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6516                 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6517                 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6518                 netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6519                            i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6520                            bdidx >> 24, (valid >> 8) & 0x0ff);
6521         }
6522         netdev_err(dev, "<--- end TBDC dump --->\n");
6523 }
6524
6525 static void
6526 bnx2_dump_state(struct bnx2 *bp)
6527 {
6528         struct net_device *dev = bp->dev;
6529         u32 val1, val2;
6530
6531         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6532         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6533                    atomic_read(&bp->intr_sem), val1);
6534         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6535         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6536         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6537         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6538                    BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6539                    BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6540         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6541                    BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6542         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6543                    BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6544         if (bp->flags & BNX2_FLAG_USING_MSIX)
6545                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6546                            BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6547 }
6548
6549 static void
6550 bnx2_tx_timeout(struct net_device *dev)
6551 {
6552         struct bnx2 *bp = netdev_priv(dev);
6553
6554         bnx2_dump_ftq(bp);
6555         bnx2_dump_state(bp);
6556         bnx2_dump_mcp_state(bp);
6557
6558         /* This allows the netif to be shutdown gracefully before resetting */
6559         schedule_work(&bp->reset_task);
6560 }
6561
6562 /* Called with netif_tx_lock.
6563  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6564  * netif_wake_queue().
6565  */
6566 static netdev_tx_t
6567 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6568 {
6569         struct bnx2 *bp = netdev_priv(dev);
6570         dma_addr_t mapping;
6571         struct bnx2_tx_bd *txbd;
6572         struct bnx2_sw_tx_bd *tx_buf;
6573         u32 len, vlan_tag_flags, last_frag, mss;
6574         u16 prod, ring_prod;
6575         int i;
6576         struct bnx2_napi *bnapi;
6577         struct bnx2_tx_ring_info *txr;
6578         struct netdev_queue *txq;
6579
6580         /*  Determine which tx ring we will be placed on */
6581         i = skb_get_queue_mapping(skb);
6582         bnapi = &bp->bnx2_napi[i];
6583         txr = &bnapi->tx_ring;
6584         txq = netdev_get_tx_queue(dev, i);
6585
6586         if (unlikely(bnx2_tx_avail(bp, txr) <
6587             (skb_shinfo(skb)->nr_frags + 1))) {
6588                 netif_tx_stop_queue(txq);
6589                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6590
6591                 return NETDEV_TX_BUSY;
6592         }
6593         len = skb_headlen(skb);
6594         prod = txr->tx_prod;
6595         ring_prod = BNX2_TX_RING_IDX(prod);
6596
6597         vlan_tag_flags = 0;
6598         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6599                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6600         }
6601
6602         if (vlan_tx_tag_present(skb)) {
6603                 vlan_tag_flags |=
6604                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6605         }
6606
6607         if ((mss = skb_shinfo(skb)->gso_size)) {
6608                 u32 tcp_opt_len;
6609                 struct iphdr *iph;
6610
6611                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6612
6613                 tcp_opt_len = tcp_optlen(skb);
6614
6615                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6616                         u32 tcp_off = skb_transport_offset(skb) -
6617                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6618
6619                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6620                                           TX_BD_FLAGS_SW_FLAGS;
6621                         if (likely(tcp_off == 0))
6622                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6623                         else {
6624                                 tcp_off >>= 3;
6625                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6626                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6627                                                   ((tcp_off & 0x10) <<
6628                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6629                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6630                         }
6631                 } else {
6632                         iph = ip_hdr(skb);
6633                         if (tcp_opt_len || (iph->ihl > 5)) {
6634                                 vlan_tag_flags |= ((iph->ihl - 5) +
6635                                                    (tcp_opt_len >> 2)) << 8;
6636                         }
6637                 }
6638         } else
6639                 mss = 0;
6640
6641         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6642         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6643                 dev_kfree_skb(skb);
6644                 return NETDEV_TX_OK;
6645         }
6646
6647         tx_buf = &txr->tx_buf_ring[ring_prod];
6648         tx_buf->skb = skb;
6649         dma_unmap_addr_set(tx_buf, mapping, mapping);
6650
6651         txbd = &txr->tx_desc_ring[ring_prod];
6652
6653         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6654         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6655         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6656         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6657
6658         last_frag = skb_shinfo(skb)->nr_frags;
6659         tx_buf->nr_frags = last_frag;
6660         tx_buf->is_gso = skb_is_gso(skb);
6661
6662         for (i = 0; i < last_frag; i++) {
6663                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6664
6665                 prod = BNX2_NEXT_TX_BD(prod);
6666                 ring_prod = BNX2_TX_RING_IDX(prod);
6667                 txbd = &txr->tx_desc_ring[ring_prod];
6668
6669                 len = skb_frag_size(frag);
6670                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6671                                            DMA_TO_DEVICE);
6672                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6673                         goto dma_error;
6674                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6675                                    mapping);
6676
6677                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6678                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6679                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6680                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6681
6682         }
6683         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6684
6685         /* Sync BD data before updating TX mailbox */
6686         wmb();
6687
6688         netdev_tx_sent_queue(txq, skb->len);
6689
6690         prod = BNX2_NEXT_TX_BD(prod);
6691         txr->tx_prod_bseq += skb->len;
6692
6693         BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6694         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6695
6696         mmiowb();
6697
6698         txr->tx_prod = prod;
6699
6700         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6701                 netif_tx_stop_queue(txq);
6702
6703                 /* netif_tx_stop_queue() must be done before checking
6704                  * tx index in bnx2_tx_avail() below, because in
6705                  * bnx2_tx_int(), we update tx index before checking for
6706                  * netif_tx_queue_stopped().
6707                  */
6708                 smp_mb();
6709                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6710                         netif_tx_wake_queue(txq);
6711         }
6712
6713         return NETDEV_TX_OK;
6714 dma_error:
6715         /* save value of frag that failed */
6716         last_frag = i;
6717
6718         /* start back at beginning and unmap skb */
6719         prod = txr->tx_prod;
6720         ring_prod = BNX2_TX_RING_IDX(prod);
6721         tx_buf = &txr->tx_buf_ring[ring_prod];
6722         tx_buf->skb = NULL;
6723         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6724                          skb_headlen(skb), PCI_DMA_TODEVICE);
6725
6726         /* unmap remaining mapped pages */
6727         for (i = 0; i < last_frag; i++) {
6728                 prod = BNX2_NEXT_TX_BD(prod);
6729                 ring_prod = BNX2_TX_RING_IDX(prod);
6730                 tx_buf = &txr->tx_buf_ring[ring_prod];
6731                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6732                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
6733                                PCI_DMA_TODEVICE);
6734         }
6735
6736         dev_kfree_skb(skb);
6737         return NETDEV_TX_OK;
6738 }
6739
6740 /* Called with rtnl_lock */
6741 static int
6742 bnx2_close(struct net_device *dev)
6743 {
6744         struct bnx2 *bp = netdev_priv(dev);
6745
6746         bnx2_disable_int_sync(bp);
6747         bnx2_napi_disable(bp);
6748         netif_tx_disable(dev);
6749         del_timer_sync(&bp->timer);
6750         bnx2_shutdown_chip(bp);
6751         bnx2_free_irq(bp);
6752         bnx2_free_skbs(bp);
6753         bnx2_free_mem(bp);
6754         bnx2_del_napi(bp);
6755         bp->link_up = 0;
6756         netif_carrier_off(bp->dev);
6757         return 0;
6758 }
6759
6760 static void
6761 bnx2_save_stats(struct bnx2 *bp)
6762 {
6763         u32 *hw_stats = (u32 *) bp->stats_blk;
6764         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6765         int i;
6766
6767         /* The 1st 10 counters are 64-bit counters */
6768         for (i = 0; i < 20; i += 2) {
6769                 u32 hi;
6770                 u64 lo;
6771
6772                 hi = temp_stats[i] + hw_stats[i];
6773                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6774                 if (lo > 0xffffffff)
6775                         hi++;
6776                 temp_stats[i] = hi;
6777                 temp_stats[i + 1] = lo & 0xffffffff;
6778         }
6779
6780         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6781                 temp_stats[i] += hw_stats[i];
6782 }
6783
6784 #define GET_64BIT_NET_STATS64(ctr)              \
6785         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6786
6787 #define GET_64BIT_NET_STATS(ctr)                                \
6788         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6789         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6790
6791 #define GET_32BIT_NET_STATS(ctr)                                \
6792         (unsigned long) (bp->stats_blk->ctr +                   \
6793                          bp->temp_stats_blk->ctr)
6794
6795 static struct rtnl_link_stats64 *
6796 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6797 {
6798         struct bnx2 *bp = netdev_priv(dev);
6799
6800         if (bp->stats_blk == NULL)
6801                 return net_stats;
6802
6803         net_stats->rx_packets =
6804                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6805                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6806                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6807
6808         net_stats->tx_packets =
6809                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6810                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6811                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6812
6813         net_stats->rx_bytes =
6814                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6815
6816         net_stats->tx_bytes =
6817                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6818
6819         net_stats->multicast =
6820                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6821
6822         net_stats->collisions =
6823                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6824
6825         net_stats->rx_length_errors =
6826                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6827                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6828
6829         net_stats->rx_over_errors =
6830                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6831                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6832
6833         net_stats->rx_frame_errors =
6834                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6835
6836         net_stats->rx_crc_errors =
6837                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6838
6839         net_stats->rx_errors = net_stats->rx_length_errors +
6840                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6841                 net_stats->rx_crc_errors;
6842
6843         net_stats->tx_aborted_errors =
6844                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6845                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6846
6847         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6848             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6849                 net_stats->tx_carrier_errors = 0;
6850         else {
6851                 net_stats->tx_carrier_errors =
6852                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6853         }
6854
6855         net_stats->tx_errors =
6856                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6857                 net_stats->tx_aborted_errors +
6858                 net_stats->tx_carrier_errors;
6859
6860         net_stats->rx_missed_errors =
6861                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6862                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6863                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6864
6865         return net_stats;
6866 }
6867
6868 /* All ethtool functions called with rtnl_lock */
6869
6870 static int
6871 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6872 {
6873         struct bnx2 *bp = netdev_priv(dev);
6874         int support_serdes = 0, support_copper = 0;
6875
6876         cmd->supported = SUPPORTED_Autoneg;
6877         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6878                 support_serdes = 1;
6879                 support_copper = 1;
6880         } else if (bp->phy_port == PORT_FIBRE)
6881                 support_serdes = 1;
6882         else
6883                 support_copper = 1;
6884
6885         if (support_serdes) {
6886                 cmd->supported |= SUPPORTED_1000baseT_Full |
6887                         SUPPORTED_FIBRE;
6888                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6889                         cmd->supported |= SUPPORTED_2500baseX_Full;
6890
6891         }
6892         if (support_copper) {
6893                 cmd->supported |= SUPPORTED_10baseT_Half |
6894                         SUPPORTED_10baseT_Full |
6895                         SUPPORTED_100baseT_Half |
6896                         SUPPORTED_100baseT_Full |
6897                         SUPPORTED_1000baseT_Full |
6898                         SUPPORTED_TP;
6899
6900         }
6901
6902         spin_lock_bh(&bp->phy_lock);
6903         cmd->port = bp->phy_port;
6904         cmd->advertising = bp->advertising;
6905
6906         if (bp->autoneg & AUTONEG_SPEED) {
6907                 cmd->autoneg = AUTONEG_ENABLE;
6908         } else {
6909                 cmd->autoneg = AUTONEG_DISABLE;
6910         }
6911
6912         if (netif_carrier_ok(dev)) {
6913                 ethtool_cmd_speed_set(cmd, bp->line_speed);
6914                 cmd->duplex = bp->duplex;
6915                 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6916                         if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6917                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
6918                         else
6919                                 cmd->eth_tp_mdix = ETH_TP_MDI;
6920                 }
6921         }
6922         else {
6923                 ethtool_cmd_speed_set(cmd, -1);
6924                 cmd->duplex = -1;
6925         }
6926         spin_unlock_bh(&bp->phy_lock);
6927
6928         cmd->transceiver = XCVR_INTERNAL;
6929         cmd->phy_address = bp->phy_addr;
6930
6931         return 0;
6932 }
6933
6934 static int
6935 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6936 {
6937         struct bnx2 *bp = netdev_priv(dev);
6938         u8 autoneg = bp->autoneg;
6939         u8 req_duplex = bp->req_duplex;
6940         u16 req_line_speed = bp->req_line_speed;
6941         u32 advertising = bp->advertising;
6942         int err = -EINVAL;
6943
6944         spin_lock_bh(&bp->phy_lock);
6945
6946         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6947                 goto err_out_unlock;
6948
6949         if (cmd->port != bp->phy_port &&
6950             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6951                 goto err_out_unlock;
6952
6953         /* If device is down, we can store the settings only if the user
6954          * is setting the currently active port.
6955          */
6956         if (!netif_running(dev) && cmd->port != bp->phy_port)
6957                 goto err_out_unlock;
6958
6959         if (cmd->autoneg == AUTONEG_ENABLE) {
6960                 autoneg |= AUTONEG_SPEED;
6961
6962                 advertising = cmd->advertising;
6963                 if (cmd->port == PORT_TP) {
6964                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6965                         if (!advertising)
6966                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6967                 } else {
6968                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6969                         if (!advertising)
6970                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6971                 }
6972                 advertising |= ADVERTISED_Autoneg;
6973         }
6974         else {
6975                 u32 speed = ethtool_cmd_speed(cmd);
6976                 if (cmd->port == PORT_FIBRE) {
6977                         if ((speed != SPEED_1000 &&
6978                              speed != SPEED_2500) ||
6979                             (cmd->duplex != DUPLEX_FULL))
6980                                 goto err_out_unlock;
6981
6982                         if (speed == SPEED_2500 &&
6983                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6984                                 goto err_out_unlock;
6985                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6986                         goto err_out_unlock;
6987
6988                 autoneg &= ~AUTONEG_SPEED;
6989                 req_line_speed = speed;
6990                 req_duplex = cmd->duplex;
6991                 advertising = 0;
6992         }
6993
6994         bp->autoneg = autoneg;
6995         bp->advertising = advertising;
6996         bp->req_line_speed = req_line_speed;
6997         bp->req_duplex = req_duplex;
6998
6999         err = 0;
7000         /* If device is down, the new settings will be picked up when it is
7001          * brought up.
7002          */
7003         if (netif_running(dev))
7004                 err = bnx2_setup_phy(bp, cmd->port);
7005
7006 err_out_unlock:
7007         spin_unlock_bh(&bp->phy_lock);
7008
7009         return err;
7010 }
7011
7012 static void
7013 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7014 {
7015         struct bnx2 *bp = netdev_priv(dev);
7016
7017         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7018         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7019         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7020         strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7021 }
7022
7023 #define BNX2_REGDUMP_LEN                (32 * 1024)
7024
7025 static int
7026 bnx2_get_regs_len(struct net_device *dev)
7027 {
7028         return BNX2_REGDUMP_LEN;
7029 }
7030
7031 static void
7032 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7033 {
7034         u32 *p = _p, i, offset;
7035         u8 *orig_p = _p;
7036         struct bnx2 *bp = netdev_priv(dev);
7037         static const u32 reg_boundaries[] = {
7038                 0x0000, 0x0098, 0x0400, 0x045c,
7039                 0x0800, 0x0880, 0x0c00, 0x0c10,
7040                 0x0c30, 0x0d08, 0x1000, 0x101c,
7041                 0x1040, 0x1048, 0x1080, 0x10a4,
7042                 0x1400, 0x1490, 0x1498, 0x14f0,
7043                 0x1500, 0x155c, 0x1580, 0x15dc,
7044                 0x1600, 0x1658, 0x1680, 0x16d8,
7045                 0x1800, 0x1820, 0x1840, 0x1854,
7046                 0x1880, 0x1894, 0x1900, 0x1984,
7047                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7048                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7049                 0x2000, 0x2030, 0x23c0, 0x2400,
7050                 0x2800, 0x2820, 0x2830, 0x2850,
7051                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7052                 0x3c00, 0x3c94, 0x4000, 0x4010,
7053                 0x4080, 0x4090, 0x43c0, 0x4458,
7054                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7055                 0x4fc0, 0x5010, 0x53c0, 0x5444,
7056                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7057                 0x5fc0, 0x6000, 0x6400, 0x6428,
7058                 0x6800, 0x6848, 0x684c, 0x6860,
7059                 0x6888, 0x6910, 0x8000
7060         };
7061
7062         regs->version = 0;
7063
7064         memset(p, 0, BNX2_REGDUMP_LEN);
7065
7066         if (!netif_running(bp->dev))
7067                 return;
7068
7069         i = 0;
7070         offset = reg_boundaries[0];
7071         p += offset;
7072         while (offset < BNX2_REGDUMP_LEN) {
7073                 *p++ = BNX2_RD(bp, offset);
7074                 offset += 4;
7075                 if (offset == reg_boundaries[i + 1]) {
7076                         offset = reg_boundaries[i + 2];
7077                         p = (u32 *) (orig_p + offset);
7078                         i += 2;
7079                 }
7080         }
7081 }
7082
7083 static void
7084 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7085 {
7086         struct bnx2 *bp = netdev_priv(dev);
7087
7088         if (bp->flags & BNX2_FLAG_NO_WOL) {
7089                 wol->supported = 0;
7090                 wol->wolopts = 0;
7091         }
7092         else {
7093                 wol->supported = WAKE_MAGIC;
7094                 if (bp->wol)
7095                         wol->wolopts = WAKE_MAGIC;
7096                 else
7097                         wol->wolopts = 0;
7098         }
7099         memset(&wol->sopass, 0, sizeof(wol->sopass));
7100 }
7101
7102 static int
7103 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7104 {
7105         struct bnx2 *bp = netdev_priv(dev);
7106
7107         if (wol->wolopts & ~WAKE_MAGIC)
7108                 return -EINVAL;
7109
7110         if (wol->wolopts & WAKE_MAGIC) {
7111                 if (bp->flags & BNX2_FLAG_NO_WOL)
7112                         return -EINVAL;
7113
7114                 bp->wol = 1;
7115         }
7116         else {
7117                 bp->wol = 0;
7118         }
7119
7120         device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7121
7122         return 0;
7123 }
7124
7125 static int
7126 bnx2_nway_reset(struct net_device *dev)
7127 {
7128         struct bnx2 *bp = netdev_priv(dev);
7129         u32 bmcr;
7130
7131         if (!netif_running(dev))
7132                 return -EAGAIN;
7133
7134         if (!(bp->autoneg & AUTONEG_SPEED)) {
7135                 return -EINVAL;
7136         }
7137
7138         spin_lock_bh(&bp->phy_lock);
7139
7140         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7141                 int rc;
7142
7143                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7144                 spin_unlock_bh(&bp->phy_lock);
7145                 return rc;
7146         }
7147
7148         /* Force a link down visible on the other side */
7149         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7150                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7151                 spin_unlock_bh(&bp->phy_lock);
7152
7153                 msleep(20);
7154
7155                 spin_lock_bh(&bp->phy_lock);
7156
7157                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7158                 bp->serdes_an_pending = 1;
7159                 mod_timer(&bp->timer, jiffies + bp->current_interval);
7160         }
7161
7162         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7163         bmcr &= ~BMCR_LOOPBACK;
7164         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7165
7166         spin_unlock_bh(&bp->phy_lock);
7167
7168         return 0;
7169 }
7170
7171 static u32
7172 bnx2_get_link(struct net_device *dev)
7173 {
7174         struct bnx2 *bp = netdev_priv(dev);
7175
7176         return bp->link_up;
7177 }
7178
7179 static int
7180 bnx2_get_eeprom_len(struct net_device *dev)
7181 {
7182         struct bnx2 *bp = netdev_priv(dev);
7183
7184         if (bp->flash_info == NULL)
7185                 return 0;
7186
7187         return (int) bp->flash_size;
7188 }
7189
7190 static int
7191 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7192                 u8 *eebuf)
7193 {
7194         struct bnx2 *bp = netdev_priv(dev);
7195         int rc;
7196
7197         /* parameters already validated in ethtool_get_eeprom */
7198
7199         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7200
7201         return rc;
7202 }
7203
7204 static int
7205 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7206                 u8 *eebuf)
7207 {
7208         struct bnx2 *bp = netdev_priv(dev);
7209         int rc;
7210
7211         /* parameters already validated in ethtool_set_eeprom */
7212
7213         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7214
7215         return rc;
7216 }
7217
7218 static int
7219 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7220 {
7221         struct bnx2 *bp = netdev_priv(dev);
7222
7223         memset(coal, 0, sizeof(struct ethtool_coalesce));
7224
7225         coal->rx_coalesce_usecs = bp->rx_ticks;
7226         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7227         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7228         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7229
7230         coal->tx_coalesce_usecs = bp->tx_ticks;
7231         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7232         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7233         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7234
7235         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7236
7237         return 0;
7238 }
7239
7240 static int
7241 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7242 {
7243         struct bnx2 *bp = netdev_priv(dev);
7244
7245         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7246         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7247
7248         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7249         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7250
7251         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7252         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7253
7254         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7255         if (bp->rx_quick_cons_trip_int > 0xff)
7256                 bp->rx_quick_cons_trip_int = 0xff;
7257
7258         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7259         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7260
7261         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7262         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7263
7264         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7265         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7266
7267         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7268         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7269                 0xff;
7270
7271         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7272         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7273                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7274                         bp->stats_ticks = USEC_PER_SEC;
7275         }
7276         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7277                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7278         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7279
7280         if (netif_running(bp->dev)) {
7281                 bnx2_netif_stop(bp, true);
7282                 bnx2_init_nic(bp, 0);
7283                 bnx2_netif_start(bp, true);
7284         }
7285
7286         return 0;
7287 }
7288
7289 static void
7290 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7291 {
7292         struct bnx2 *bp = netdev_priv(dev);
7293
7294         ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7295         ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7296
7297         ering->rx_pending = bp->rx_ring_size;
7298         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7299
7300         ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7301         ering->tx_pending = bp->tx_ring_size;
7302 }
7303
7304 static int
7305 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7306 {
7307         if (netif_running(bp->dev)) {
7308                 /* Reset will erase chipset stats; save them */
7309                 bnx2_save_stats(bp);
7310
7311                 bnx2_netif_stop(bp, true);
7312                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7313                 if (reset_irq) {
7314                         bnx2_free_irq(bp);
7315                         bnx2_del_napi(bp);
7316                 } else {
7317                         __bnx2_free_irq(bp);
7318                 }
7319                 bnx2_free_skbs(bp);
7320                 bnx2_free_mem(bp);
7321         }
7322
7323         bnx2_set_rx_ring_size(bp, rx);
7324         bp->tx_ring_size = tx;
7325
7326         if (netif_running(bp->dev)) {
7327                 int rc = 0;
7328
7329                 if (reset_irq) {
7330                         rc = bnx2_setup_int_mode(bp, disable_msi);
7331                         bnx2_init_napi(bp);
7332                 }
7333
7334                 if (!rc)
7335                         rc = bnx2_alloc_mem(bp);
7336
7337                 if (!rc)
7338                         rc = bnx2_request_irq(bp);
7339
7340                 if (!rc)
7341                         rc = bnx2_init_nic(bp, 0);
7342
7343                 if (rc) {
7344                         bnx2_napi_enable(bp);
7345                         dev_close(bp->dev);
7346                         return rc;
7347                 }
7348 #ifdef BCM_CNIC
7349                 mutex_lock(&bp->cnic_lock);
7350                 /* Let cnic know about the new status block. */
7351                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7352                         bnx2_setup_cnic_irq_info(bp);
7353                 mutex_unlock(&bp->cnic_lock);
7354 #endif
7355                 bnx2_netif_start(bp, true);
7356         }
7357         return 0;
7358 }
7359
7360 static int
7361 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7362 {
7363         struct bnx2 *bp = netdev_priv(dev);
7364         int rc;
7365
7366         if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7367                 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7368                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7369
7370                 return -EINVAL;
7371         }
7372         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7373                                    false);
7374         return rc;
7375 }
7376
7377 static void
7378 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7379 {
7380         struct bnx2 *bp = netdev_priv(dev);
7381
7382         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7383         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7384         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7385 }
7386
7387 static int
7388 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7389 {
7390         struct bnx2 *bp = netdev_priv(dev);
7391
7392         bp->req_flow_ctrl = 0;
7393         if (epause->rx_pause)
7394                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7395         if (epause->tx_pause)
7396                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7397
7398         if (epause->autoneg) {
7399                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7400         }
7401         else {
7402                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7403         }
7404
7405         if (netif_running(dev)) {
7406                 spin_lock_bh(&bp->phy_lock);
7407                 bnx2_setup_phy(bp, bp->phy_port);
7408                 spin_unlock_bh(&bp->phy_lock);
7409         }
7410
7411         return 0;
7412 }
7413
7414 static struct {
7415         char string[ETH_GSTRING_LEN];
7416 } bnx2_stats_str_arr[] = {
7417         { "rx_bytes" },
7418         { "rx_error_bytes" },
7419         { "tx_bytes" },
7420         { "tx_error_bytes" },
7421         { "rx_ucast_packets" },
7422         { "rx_mcast_packets" },
7423         { "rx_bcast_packets" },
7424         { "tx_ucast_packets" },
7425         { "tx_mcast_packets" },
7426         { "tx_bcast_packets" },
7427         { "tx_mac_errors" },
7428         { "tx_carrier_errors" },
7429         { "rx_crc_errors" },
7430         { "rx_align_errors" },
7431         { "tx_single_collisions" },
7432         { "tx_multi_collisions" },
7433         { "tx_deferred" },
7434         { "tx_excess_collisions" },
7435         { "tx_late_collisions" },
7436         { "tx_total_collisions" },
7437         { "rx_fragments" },
7438         { "rx_jabbers" },
7439         { "rx_undersize_packets" },
7440         { "rx_oversize_packets" },
7441         { "rx_64_byte_packets" },
7442         { "rx_65_to_127_byte_packets" },
7443         { "rx_128_to_255_byte_packets" },
7444         { "rx_256_to_511_byte_packets" },
7445         { "rx_512_to_1023_byte_packets" },
7446         { "rx_1024_to_1522_byte_packets" },
7447         { "rx_1523_to_9022_byte_packets" },
7448         { "tx_64_byte_packets" },
7449         { "tx_65_to_127_byte_packets" },
7450         { "tx_128_to_255_byte_packets" },
7451         { "tx_256_to_511_byte_packets" },
7452         { "tx_512_to_1023_byte_packets" },
7453         { "tx_1024_to_1522_byte_packets" },
7454         { "tx_1523_to_9022_byte_packets" },
7455         { "rx_xon_frames" },
7456         { "rx_xoff_frames" },
7457         { "tx_xon_frames" },
7458         { "tx_xoff_frames" },
7459         { "rx_mac_ctrl_frames" },
7460         { "rx_filtered_packets" },
7461         { "rx_ftq_discards" },
7462         { "rx_discards" },
7463         { "rx_fw_discards" },
7464 };
7465
7466 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7467
7468 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7469
7470 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7471     STATS_OFFSET32(stat_IfHCInOctets_hi),
7472     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7473     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7474     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7475     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7476     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7477     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7478     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7479     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7480     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7481     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7482     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7483     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7484     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7485     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7486     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7487     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7488     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7489     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7490     STATS_OFFSET32(stat_EtherStatsCollisions),
7491     STATS_OFFSET32(stat_EtherStatsFragments),
7492     STATS_OFFSET32(stat_EtherStatsJabbers),
7493     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7494     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7495     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7496     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7497     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7498     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7499     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7500     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7501     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7502     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7503     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7504     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7505     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7506     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7507     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7508     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7509     STATS_OFFSET32(stat_XonPauseFramesReceived),
7510     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7511     STATS_OFFSET32(stat_OutXonSent),
7512     STATS_OFFSET32(stat_OutXoffSent),
7513     STATS_OFFSET32(stat_MacControlFramesReceived),
7514     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7515     STATS_OFFSET32(stat_IfInFTQDiscards),
7516     STATS_OFFSET32(stat_IfInMBUFDiscards),
7517     STATS_OFFSET32(stat_FwRxDrop),
7518 };
7519
7520 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7521  * skipped because of errata.
7522  */
7523 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7524         8,0,8,8,8,8,8,8,8,8,
7525         4,0,4,4,4,4,4,4,4,4,
7526         4,4,4,4,4,4,4,4,4,4,
7527         4,4,4,4,4,4,4,4,4,4,
7528         4,4,4,4,4,4,4,
7529 };
7530
7531 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7532         8,0,8,8,8,8,8,8,8,8,
7533         4,4,4,4,4,4,4,4,4,4,
7534         4,4,4,4,4,4,4,4,4,4,
7535         4,4,4,4,4,4,4,4,4,4,
7536         4,4,4,4,4,4,4,
7537 };
7538
7539 #define BNX2_NUM_TESTS 6
7540
7541 static struct {
7542         char string[ETH_GSTRING_LEN];
7543 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7544         { "register_test (offline)" },
7545         { "memory_test (offline)" },
7546         { "loopback_test (offline)" },
7547         { "nvram_test (online)" },
7548         { "interrupt_test (online)" },
7549         { "link_test (online)" },
7550 };
7551
7552 static int
7553 bnx2_get_sset_count(struct net_device *dev, int sset)
7554 {
7555         switch (sset) {
7556         case ETH_SS_TEST:
7557                 return BNX2_NUM_TESTS;
7558         case ETH_SS_STATS:
7559                 return BNX2_NUM_STATS;
7560         default:
7561                 return -EOPNOTSUPP;
7562         }
7563 }
7564
7565 static void
7566 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7567 {
7568         struct bnx2 *bp = netdev_priv(dev);
7569
7570         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7571         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7572                 int i;
7573
7574                 bnx2_netif_stop(bp, true);
7575                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7576                 bnx2_free_skbs(bp);
7577
7578                 if (bnx2_test_registers(bp) != 0) {
7579                         buf[0] = 1;
7580                         etest->flags |= ETH_TEST_FL_FAILED;
7581                 }
7582                 if (bnx2_test_memory(bp) != 0) {
7583                         buf[1] = 1;
7584                         etest->flags |= ETH_TEST_FL_FAILED;
7585                 }
7586                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7587                         etest->flags |= ETH_TEST_FL_FAILED;
7588
7589                 if (!netif_running(bp->dev))
7590                         bnx2_shutdown_chip(bp);
7591                 else {
7592                         bnx2_init_nic(bp, 1);
7593                         bnx2_netif_start(bp, true);
7594                 }
7595
7596                 /* wait for link up */
7597                 for (i = 0; i < 7; i++) {
7598                         if (bp->link_up)
7599                                 break;
7600                         msleep_interruptible(1000);
7601                 }
7602         }
7603
7604         if (bnx2_test_nvram(bp) != 0) {
7605                 buf[3] = 1;
7606                 etest->flags |= ETH_TEST_FL_FAILED;
7607         }
7608         if (bnx2_test_intr(bp) != 0) {
7609                 buf[4] = 1;
7610                 etest->flags |= ETH_TEST_FL_FAILED;
7611         }
7612
7613         if (bnx2_test_link(bp) != 0) {
7614                 buf[5] = 1;
7615                 etest->flags |= ETH_TEST_FL_FAILED;
7616
7617         }
7618 }
7619
7620 static void
7621 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7622 {
7623         switch (stringset) {
7624         case ETH_SS_STATS:
7625                 memcpy(buf, bnx2_stats_str_arr,
7626                         sizeof(bnx2_stats_str_arr));
7627                 break;
7628         case ETH_SS_TEST:
7629                 memcpy(buf, bnx2_tests_str_arr,
7630                         sizeof(bnx2_tests_str_arr));
7631                 break;
7632         }
7633 }
7634
7635 static void
7636 bnx2_get_ethtool_stats(struct net_device *dev,
7637                 struct ethtool_stats *stats, u64 *buf)
7638 {
7639         struct bnx2 *bp = netdev_priv(dev);
7640         int i;
7641         u32 *hw_stats = (u32 *) bp->stats_blk;
7642         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7643         u8 *stats_len_arr = NULL;
7644
7645         if (hw_stats == NULL) {
7646                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7647                 return;
7648         }
7649
7650         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7651             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7652             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7653             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7654                 stats_len_arr = bnx2_5706_stats_len_arr;
7655         else
7656                 stats_len_arr = bnx2_5708_stats_len_arr;
7657
7658         for (i = 0; i < BNX2_NUM_STATS; i++) {
7659                 unsigned long offset;
7660
7661                 if (stats_len_arr[i] == 0) {
7662                         /* skip this counter */
7663                         buf[i] = 0;
7664                         continue;
7665                 }
7666
7667                 offset = bnx2_stats_offset_arr[i];
7668                 if (stats_len_arr[i] == 4) {
7669                         /* 4-byte counter */
7670                         buf[i] = (u64) *(hw_stats + offset) +
7671                                  *(temp_stats + offset);
7672                         continue;
7673                 }
7674                 /* 8-byte counter */
7675                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7676                          *(hw_stats + offset + 1) +
7677                          (((u64) *(temp_stats + offset)) << 32) +
7678                          *(temp_stats + offset + 1);
7679         }
7680 }
7681
7682 static int
7683 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7684 {
7685         struct bnx2 *bp = netdev_priv(dev);
7686
7687         switch (state) {
7688         case ETHTOOL_ID_ACTIVE:
7689                 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7690                 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7691                 return 1;       /* cycle on/off once per second */
7692
7693         case ETHTOOL_ID_ON:
7694                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7695                         BNX2_EMAC_LED_1000MB_OVERRIDE |
7696                         BNX2_EMAC_LED_100MB_OVERRIDE |
7697                         BNX2_EMAC_LED_10MB_OVERRIDE |
7698                         BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7699                         BNX2_EMAC_LED_TRAFFIC);
7700                 break;
7701
7702         case ETHTOOL_ID_OFF:
7703                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7704                 break;
7705
7706         case ETHTOOL_ID_INACTIVE:
7707                 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7708                 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7709                 break;
7710         }
7711
7712         return 0;
7713 }
7714
7715 static netdev_features_t
7716 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7717 {
7718         struct bnx2 *bp = netdev_priv(dev);
7719
7720         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7721                 features |= NETIF_F_HW_VLAN_CTAG_RX;
7722
7723         return features;
7724 }
7725
7726 static int
7727 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7728 {
7729         struct bnx2 *bp = netdev_priv(dev);
7730
7731         /* TSO with VLAN tag won't work with current firmware */
7732         if (features & NETIF_F_HW_VLAN_CTAG_TX)
7733                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7734         else
7735                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7736
7737         if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7738             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7739             netif_running(dev)) {
7740                 bnx2_netif_stop(bp, false);
7741                 dev->features = features;
7742                 bnx2_set_rx_mode(dev);
7743                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7744                 bnx2_netif_start(bp, false);
7745                 return 1;
7746         }
7747
7748         return 0;
7749 }
7750
7751 static void bnx2_get_channels(struct net_device *dev,
7752                               struct ethtool_channels *channels)
7753 {
7754         struct bnx2 *bp = netdev_priv(dev);
7755         u32 max_rx_rings = 1;
7756         u32 max_tx_rings = 1;
7757
7758         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7759                 max_rx_rings = RX_MAX_RINGS;
7760                 max_tx_rings = TX_MAX_RINGS;
7761         }
7762
7763         channels->max_rx = max_rx_rings;
7764         channels->max_tx = max_tx_rings;
7765         channels->max_other = 0;
7766         channels->max_combined = 0;
7767         channels->rx_count = bp->num_rx_rings;
7768         channels->tx_count = bp->num_tx_rings;
7769         channels->other_count = 0;
7770         channels->combined_count = 0;
7771 }
7772
7773 static int bnx2_set_channels(struct net_device *dev,
7774                               struct ethtool_channels *channels)
7775 {
7776         struct bnx2 *bp = netdev_priv(dev);
7777         u32 max_rx_rings = 1;
7778         u32 max_tx_rings = 1;
7779         int rc = 0;
7780
7781         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7782                 max_rx_rings = RX_MAX_RINGS;
7783                 max_tx_rings = TX_MAX_RINGS;
7784         }
7785         if (channels->rx_count > max_rx_rings ||
7786             channels->tx_count > max_tx_rings)
7787                 return -EINVAL;
7788
7789         bp->num_req_rx_rings = channels->rx_count;
7790         bp->num_req_tx_rings = channels->tx_count;
7791
7792         if (netif_running(dev))
7793                 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7794                                            bp->tx_ring_size, true);
7795
7796         return rc;
7797 }
7798
7799 static const struct ethtool_ops bnx2_ethtool_ops = {
7800         .get_settings           = bnx2_get_settings,
7801         .set_settings           = bnx2_set_settings,
7802         .get_drvinfo            = bnx2_get_drvinfo,
7803         .get_regs_len           = bnx2_get_regs_len,
7804         .get_regs               = bnx2_get_regs,
7805         .get_wol                = bnx2_get_wol,
7806         .set_wol                = bnx2_set_wol,
7807         .nway_reset             = bnx2_nway_reset,
7808         .get_link               = bnx2_get_link,
7809         .get_eeprom_len         = bnx2_get_eeprom_len,
7810         .get_eeprom             = bnx2_get_eeprom,
7811         .set_eeprom             = bnx2_set_eeprom,
7812         .get_coalesce           = bnx2_get_coalesce,
7813         .set_coalesce           = bnx2_set_coalesce,
7814         .get_ringparam          = bnx2_get_ringparam,
7815         .set_ringparam          = bnx2_set_ringparam,
7816         .get_pauseparam         = bnx2_get_pauseparam,
7817         .set_pauseparam         = bnx2_set_pauseparam,
7818         .self_test              = bnx2_self_test,
7819         .get_strings            = bnx2_get_strings,
7820         .set_phys_id            = bnx2_set_phys_id,
7821         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7822         .get_sset_count         = bnx2_get_sset_count,
7823         .get_channels           = bnx2_get_channels,
7824         .set_channels           = bnx2_set_channels,
7825 };
7826
7827 /* Called with rtnl_lock */
7828 static int
7829 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7830 {
7831         struct mii_ioctl_data *data = if_mii(ifr);
7832         struct bnx2 *bp = netdev_priv(dev);
7833         int err;
7834
7835         switch(cmd) {
7836         case SIOCGMIIPHY:
7837                 data->phy_id = bp->phy_addr;
7838
7839                 /* fallthru */
7840         case SIOCGMIIREG: {
7841                 u32 mii_regval;
7842
7843                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7844                         return -EOPNOTSUPP;
7845
7846                 if (!netif_running(dev))
7847                         return -EAGAIN;
7848
7849                 spin_lock_bh(&bp->phy_lock);
7850                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7851                 spin_unlock_bh(&bp->phy_lock);
7852
7853                 data->val_out = mii_regval;
7854
7855                 return err;
7856         }
7857
7858         case SIOCSMIIREG:
7859                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7860                         return -EOPNOTSUPP;
7861
7862                 if (!netif_running(dev))
7863                         return -EAGAIN;
7864
7865                 spin_lock_bh(&bp->phy_lock);
7866                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7867                 spin_unlock_bh(&bp->phy_lock);
7868
7869                 return err;
7870
7871         default:
7872                 /* do nothing */
7873                 break;
7874         }
7875         return -EOPNOTSUPP;
7876 }
7877
7878 /* Called with rtnl_lock */
7879 static int
7880 bnx2_change_mac_addr(struct net_device *dev, void *p)
7881 {
7882         struct sockaddr *addr = p;
7883         struct bnx2 *bp = netdev_priv(dev);
7884
7885         if (!is_valid_ether_addr(addr->sa_data))
7886                 return -EADDRNOTAVAIL;
7887
7888         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7889         if (netif_running(dev))
7890                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7891
7892         return 0;
7893 }
7894
7895 /* Called with rtnl_lock */
7896 static int
7897 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7898 {
7899         struct bnx2 *bp = netdev_priv(dev);
7900
7901         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7902                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7903                 return -EINVAL;
7904
7905         dev->mtu = new_mtu;
7906         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7907                                      false);
7908 }
7909
7910 #ifdef CONFIG_NET_POLL_CONTROLLER
7911 static void
7912 poll_bnx2(struct net_device *dev)
7913 {
7914         struct bnx2 *bp = netdev_priv(dev);
7915         int i;
7916
7917         for (i = 0; i < bp->irq_nvecs; i++) {
7918                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7919
7920                 disable_irq(irq->vector);
7921                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7922                 enable_irq(irq->vector);
7923         }
7924 }
7925 #endif
7926
7927 static void
7928 bnx2_get_5709_media(struct bnx2 *bp)
7929 {
7930         u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7931         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7932         u32 strap;
7933
7934         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7935                 return;
7936         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7937                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7938                 return;
7939         }
7940
7941         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7942                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7943         else
7944                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7945
7946         if (bp->func == 0) {
7947                 switch (strap) {
7948                 case 0x4:
7949                 case 0x5:
7950                 case 0x6:
7951                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7952                         return;
7953                 }
7954         } else {
7955                 switch (strap) {
7956                 case 0x1:
7957                 case 0x2:
7958                 case 0x4:
7959                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7960                         return;
7961                 }
7962         }
7963 }
7964
7965 static void
7966 bnx2_get_pci_speed(struct bnx2 *bp)
7967 {
7968         u32 reg;
7969
7970         reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7971         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7972                 u32 clkreg;
7973
7974                 bp->flags |= BNX2_FLAG_PCIX;
7975
7976                 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7977
7978                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7979                 switch (clkreg) {
7980                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7981                         bp->bus_speed_mhz = 133;
7982                         break;
7983
7984                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7985                         bp->bus_speed_mhz = 100;
7986                         break;
7987
7988                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7989                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7990                         bp->bus_speed_mhz = 66;
7991                         break;
7992
7993                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7994                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7995                         bp->bus_speed_mhz = 50;
7996                         break;
7997
7998                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7999                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8000                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8001                         bp->bus_speed_mhz = 33;
8002                         break;
8003                 }
8004         }
8005         else {
8006                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8007                         bp->bus_speed_mhz = 66;
8008                 else
8009                         bp->bus_speed_mhz = 33;
8010         }
8011
8012         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8013                 bp->flags |= BNX2_FLAG_PCI_32BIT;
8014
8015 }
8016
8017 static void
8018 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8019 {
8020         int rc, i, j;
8021         u8 *data;
8022         unsigned int block_end, rosize, len;
8023
8024 #define BNX2_VPD_NVRAM_OFFSET   0x300
8025 #define BNX2_VPD_LEN            128
8026 #define BNX2_MAX_VER_SLEN       30
8027
8028         data = kmalloc(256, GFP_KERNEL);
8029         if (!data)
8030                 return;
8031
8032         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8033                              BNX2_VPD_LEN);
8034         if (rc)
8035                 goto vpd_done;
8036
8037         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8038                 data[i] = data[i + BNX2_VPD_LEN + 3];
8039                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8040                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8041                 data[i + 3] = data[i + BNX2_VPD_LEN];
8042         }
8043
8044         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8045         if (i < 0)
8046                 goto vpd_done;
8047
8048         rosize = pci_vpd_lrdt_size(&data[i]);
8049         i += PCI_VPD_LRDT_TAG_SIZE;
8050         block_end = i + rosize;
8051
8052         if (block_end > BNX2_VPD_LEN)
8053                 goto vpd_done;
8054
8055         j = pci_vpd_find_info_keyword(data, i, rosize,
8056                                       PCI_VPD_RO_KEYWORD_MFR_ID);
8057         if (j < 0)
8058                 goto vpd_done;
8059
8060         len = pci_vpd_info_field_size(&data[j]);
8061
8062         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8063         if (j + len > block_end || len != 4 ||
8064             memcmp(&data[j], "1028", 4))
8065                 goto vpd_done;
8066
8067         j = pci_vpd_find_info_keyword(data, i, rosize,
8068                                       PCI_VPD_RO_KEYWORD_VENDOR0);
8069         if (j < 0)
8070                 goto vpd_done;
8071
8072         len = pci_vpd_info_field_size(&data[j]);
8073
8074         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8075         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8076                 goto vpd_done;
8077
8078         memcpy(bp->fw_version, &data[j], len);
8079         bp->fw_version[len] = ' ';
8080
8081 vpd_done:
8082         kfree(data);
8083 }
8084
8085 static int
8086 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8087 {
8088         struct bnx2 *bp;
8089         int rc, i, j;
8090         u32 reg;
8091         u64 dma_mask, persist_dma_mask;
8092         int err;
8093
8094         SET_NETDEV_DEV(dev, &pdev->dev);
8095         bp = netdev_priv(dev);
8096
8097         bp->flags = 0;
8098         bp->phy_flags = 0;
8099
8100         bp->temp_stats_blk =
8101                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8102
8103         if (bp->temp_stats_blk == NULL) {
8104                 rc = -ENOMEM;
8105                 goto err_out;
8106         }
8107
8108         /* enable device (incl. PCI PM wakeup), and bus-mastering */
8109         rc = pci_enable_device(pdev);
8110         if (rc) {
8111                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8112                 goto err_out;
8113         }
8114
8115         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8116                 dev_err(&pdev->dev,
8117                         "Cannot find PCI device base address, aborting\n");
8118                 rc = -ENODEV;
8119                 goto err_out_disable;
8120         }
8121
8122         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8123         if (rc) {
8124                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8125                 goto err_out_disable;
8126         }
8127
8128         pci_set_master(pdev);
8129
8130         bp->pm_cap = pdev->pm_cap;
8131         if (bp->pm_cap == 0) {
8132                 dev_err(&pdev->dev,
8133                         "Cannot find power management capability, aborting\n");
8134                 rc = -EIO;
8135                 goto err_out_release;
8136         }
8137
8138         bp->dev = dev;
8139         bp->pdev = pdev;
8140
8141         spin_lock_init(&bp->phy_lock);
8142         spin_lock_init(&bp->indirect_lock);
8143 #ifdef BCM_CNIC
8144         mutex_init(&bp->cnic_lock);
8145 #endif
8146         INIT_WORK(&bp->reset_task, bnx2_reset_task);
8147
8148         bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8149                                                          TX_MAX_TSS_RINGS + 1));
8150         if (!bp->regview) {
8151                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8152                 rc = -ENOMEM;
8153                 goto err_out_release;
8154         }
8155
8156         /* Configure byte swap and enable write to the reg_window registers.
8157          * Rely on CPU to do target byte swapping on big endian systems
8158          * The chip's target access swapping will not swap all accesses
8159          */
8160         BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8161                 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8162                 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8163
8164         bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8165
8166         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8167                 if (!pci_is_pcie(pdev)) {
8168                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
8169                         rc = -EIO;
8170                         goto err_out_unmap;
8171                 }
8172                 bp->flags |= BNX2_FLAG_PCIE;
8173                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8174                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8175
8176                 /* AER (Advanced Error Reporting) hooks */
8177                 err = pci_enable_pcie_error_reporting(pdev);
8178                 if (!err)
8179                         bp->flags |= BNX2_FLAG_AER_ENABLED;
8180
8181         } else {
8182                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8183                 if (bp->pcix_cap == 0) {
8184                         dev_err(&pdev->dev,
8185                                 "Cannot find PCIX capability, aborting\n");
8186                         rc = -EIO;
8187                         goto err_out_unmap;
8188                 }
8189                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8190         }
8191
8192         if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8193             BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8194                 if (pdev->msix_cap)
8195                         bp->flags |= BNX2_FLAG_MSIX_CAP;
8196         }
8197
8198         if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8199             BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8200                 if (pdev->msi_cap)
8201                         bp->flags |= BNX2_FLAG_MSI_CAP;
8202         }
8203
8204         /* 5708 cannot support DMA addresses > 40-bit.  */
8205         if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8206                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8207         else
8208                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8209
8210         /* Configure DMA attributes. */
8211         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8212                 dev->features |= NETIF_F_HIGHDMA;
8213                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8214                 if (rc) {
8215                         dev_err(&pdev->dev,
8216                                 "pci_set_consistent_dma_mask failed, aborting\n");
8217                         goto err_out_unmap;
8218                 }
8219         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8220                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8221                 goto err_out_unmap;
8222         }
8223
8224         if (!(bp->flags & BNX2_FLAG_PCIE))
8225                 bnx2_get_pci_speed(bp);
8226
8227         /* 5706A0 may falsely detect SERR and PERR. */
8228         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8229                 reg = BNX2_RD(bp, PCI_COMMAND);
8230                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8231                 BNX2_WR(bp, PCI_COMMAND, reg);
8232         } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8233                 !(bp->flags & BNX2_FLAG_PCIX)) {
8234
8235                 dev_err(&pdev->dev,
8236                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8237                 goto err_out_unmap;
8238         }
8239
8240         bnx2_init_nvram(bp);
8241
8242         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8243
8244         if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8245                 bp->func = 1;
8246
8247         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8248             BNX2_SHM_HDR_SIGNATURE_SIG) {
8249                 u32 off = bp->func << 2;
8250
8251                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8252         } else
8253                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8254
8255         /* Get the permanent MAC address.  First we need to make sure the
8256          * firmware is actually running.
8257          */
8258         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8259
8260         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8261             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8262                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8263                 rc = -ENODEV;
8264                 goto err_out_unmap;
8265         }
8266
8267         bnx2_read_vpd_fw_ver(bp);
8268
8269         j = strlen(bp->fw_version);
8270         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8271         for (i = 0; i < 3 && j < 24; i++) {
8272                 u8 num, k, skip0;
8273
8274                 if (i == 0) {
8275                         bp->fw_version[j++] = 'b';
8276                         bp->fw_version[j++] = 'c';
8277                         bp->fw_version[j++] = ' ';
8278                 }
8279                 num = (u8) (reg >> (24 - (i * 8)));
8280                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8281                         if (num >= k || !skip0 || k == 1) {
8282                                 bp->fw_version[j++] = (num / k) + '0';
8283                                 skip0 = 0;
8284                         }
8285                 }
8286                 if (i != 2)
8287                         bp->fw_version[j++] = '.';
8288         }
8289         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8290         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8291                 bp->wol = 1;
8292
8293         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8294                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8295
8296                 for (i = 0; i < 30; i++) {
8297                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8298                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8299                                 break;
8300                         msleep(10);
8301                 }
8302         }
8303         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8304         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8305         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8306             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8307                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8308
8309                 if (j < 32)
8310                         bp->fw_version[j++] = ' ';
8311                 for (i = 0; i < 3 && j < 28; i++) {
8312                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8313                         reg = be32_to_cpu(reg);
8314                         memcpy(&bp->fw_version[j], &reg, 4);
8315                         j += 4;
8316                 }
8317         }
8318
8319         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8320         bp->mac_addr[0] = (u8) (reg >> 8);
8321         bp->mac_addr[1] = (u8) reg;
8322
8323         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8324         bp->mac_addr[2] = (u8) (reg >> 24);
8325         bp->mac_addr[3] = (u8) (reg >> 16);
8326         bp->mac_addr[4] = (u8) (reg >> 8);
8327         bp->mac_addr[5] = (u8) reg;
8328
8329         bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8330         bnx2_set_rx_ring_size(bp, 255);
8331
8332         bp->tx_quick_cons_trip_int = 2;
8333         bp->tx_quick_cons_trip = 20;
8334         bp->tx_ticks_int = 18;
8335         bp->tx_ticks = 80;
8336
8337         bp->rx_quick_cons_trip_int = 2;
8338         bp->rx_quick_cons_trip = 12;
8339         bp->rx_ticks_int = 18;
8340         bp->rx_ticks = 18;
8341
8342         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8343
8344         bp->current_interval = BNX2_TIMER_INTERVAL;
8345
8346         bp->phy_addr = 1;
8347
8348         /* Disable WOL support if we are running on a SERDES chip. */
8349         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8350                 bnx2_get_5709_media(bp);
8351         else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8352                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8353
8354         bp->phy_port = PORT_TP;
8355         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8356                 bp->phy_port = PORT_FIBRE;
8357                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8358                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8359                         bp->flags |= BNX2_FLAG_NO_WOL;
8360                         bp->wol = 0;
8361                 }
8362                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8363                         /* Don't do parallel detect on this board because of
8364                          * some board problems.  The link will not go down
8365                          * if we do parallel detect.
8366                          */
8367                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8368                             pdev->subsystem_device == 0x310c)
8369                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8370                 } else {
8371                         bp->phy_addr = 2;
8372                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8373                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8374                 }
8375         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8376                    BNX2_CHIP(bp) == BNX2_CHIP_5708)
8377                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8378         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8379                  (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8380                   BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8381                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8382
8383         bnx2_init_fw_cap(bp);
8384
8385         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8386             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8387             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8388             !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8389                 bp->flags |= BNX2_FLAG_NO_WOL;
8390                 bp->wol = 0;
8391         }
8392
8393         if (bp->flags & BNX2_FLAG_NO_WOL)
8394                 device_set_wakeup_capable(&bp->pdev->dev, false);
8395         else
8396                 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8397
8398         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8399                 bp->tx_quick_cons_trip_int =
8400                         bp->tx_quick_cons_trip;
8401                 bp->tx_ticks_int = bp->tx_ticks;
8402                 bp->rx_quick_cons_trip_int =
8403                         bp->rx_quick_cons_trip;
8404                 bp->rx_ticks_int = bp->rx_ticks;
8405                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8406                 bp->com_ticks_int = bp->com_ticks;
8407                 bp->cmd_ticks_int = bp->cmd_ticks;
8408         }
8409
8410         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8411          *
8412          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8413          * with byte enables disabled on the unused 32-bit word.  This is legal
8414          * but causes problems on the AMD 8132 which will eventually stop
8415          * responding after a while.
8416          *
8417          * AMD believes this incompatibility is unique to the 5706, and
8418          * prefers to locally disable MSI rather than globally disabling it.
8419          */
8420         if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8421                 struct pci_dev *amd_8132 = NULL;
8422
8423                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8424                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8425                                                   amd_8132))) {
8426
8427                         if (amd_8132->revision >= 0x10 &&
8428                             amd_8132->revision <= 0x13) {
8429                                 disable_msi = 1;
8430                                 pci_dev_put(amd_8132);
8431                                 break;
8432                         }
8433                 }
8434         }
8435
8436         bnx2_set_default_link(bp);
8437         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8438
8439         init_timer(&bp->timer);
8440         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8441         bp->timer.data = (unsigned long) bp;
8442         bp->timer.function = bnx2_timer;
8443
8444 #ifdef BCM_CNIC
8445         if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8446                 bp->cnic_eth_dev.max_iscsi_conn =
8447                         (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8448                          BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8449         bp->cnic_probe = bnx2_cnic_probe;
8450 #endif
8451         pci_save_state(pdev);
8452
8453         return 0;
8454
8455 err_out_unmap:
8456         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8457                 pci_disable_pcie_error_reporting(pdev);
8458                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8459         }
8460
8461         pci_iounmap(pdev, bp->regview);
8462         bp->regview = NULL;
8463
8464 err_out_release:
8465         pci_release_regions(pdev);
8466
8467 err_out_disable:
8468         pci_disable_device(pdev);
8469
8470 err_out:
8471         return rc;
8472 }
8473
8474 static char *
8475 bnx2_bus_string(struct bnx2 *bp, char *str)
8476 {
8477         char *s = str;
8478
8479         if (bp->flags & BNX2_FLAG_PCIE) {
8480                 s += sprintf(s, "PCI Express");
8481         } else {
8482                 s += sprintf(s, "PCI");
8483                 if (bp->flags & BNX2_FLAG_PCIX)
8484                         s += sprintf(s, "-X");
8485                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8486                         s += sprintf(s, " 32-bit");
8487                 else
8488                         s += sprintf(s, " 64-bit");
8489                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8490         }
8491         return str;
8492 }
8493
8494 static void
8495 bnx2_del_napi(struct bnx2 *bp)
8496 {
8497         int i;
8498
8499         for (i = 0; i < bp->irq_nvecs; i++)
8500                 netif_napi_del(&bp->bnx2_napi[i].napi);
8501 }
8502
8503 static void
8504 bnx2_init_napi(struct bnx2 *bp)
8505 {
8506         int i;
8507
8508         for (i = 0; i < bp->irq_nvecs; i++) {
8509                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8510                 int (*poll)(struct napi_struct *, int);
8511
8512                 if (i == 0)
8513                         poll = bnx2_poll;
8514                 else
8515                         poll = bnx2_poll_msix;
8516
8517                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8518                 bnapi->bp = bp;
8519         }
8520 }
8521
8522 static const struct net_device_ops bnx2_netdev_ops = {
8523         .ndo_open               = bnx2_open,
8524         .ndo_start_xmit         = bnx2_start_xmit,
8525         .ndo_stop               = bnx2_close,
8526         .ndo_get_stats64        = bnx2_get_stats64,
8527         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8528         .ndo_do_ioctl           = bnx2_ioctl,
8529         .ndo_validate_addr      = eth_validate_addr,
8530         .ndo_set_mac_address    = bnx2_change_mac_addr,
8531         .ndo_change_mtu         = bnx2_change_mtu,
8532         .ndo_fix_features       = bnx2_fix_features,
8533         .ndo_set_features       = bnx2_set_features,
8534         .ndo_tx_timeout         = bnx2_tx_timeout,
8535 #ifdef CONFIG_NET_POLL_CONTROLLER
8536         .ndo_poll_controller    = poll_bnx2,
8537 #endif
8538 };
8539
8540 static int
8541 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8542 {
8543         static int version_printed = 0;
8544         struct net_device *dev;
8545         struct bnx2 *bp;
8546         int rc;
8547         char str[40];
8548
8549         if (version_printed++ == 0)
8550                 pr_info("%s", version);
8551
8552         /* dev zeroed in init_etherdev */
8553         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8554         if (!dev)
8555                 return -ENOMEM;
8556
8557         rc = bnx2_init_board(pdev, dev);
8558         if (rc < 0)
8559                 goto err_free;
8560
8561         dev->netdev_ops = &bnx2_netdev_ops;
8562         dev->watchdog_timeo = TX_TIMEOUT;
8563         dev->ethtool_ops = &bnx2_ethtool_ops;
8564
8565         bp = netdev_priv(dev);
8566
8567         pci_set_drvdata(pdev, dev);
8568
8569         memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8570
8571         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8572                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8573                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8574
8575         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8576                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8577
8578         dev->vlan_features = dev->hw_features;
8579         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8580         dev->features |= dev->hw_features;
8581         dev->priv_flags |= IFF_UNICAST_FLT;
8582
8583         if ((rc = register_netdev(dev))) {
8584                 dev_err(&pdev->dev, "Cannot register net device\n");
8585                 goto error;
8586         }
8587
8588         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8589                     "node addr %pM\n", board_info[ent->driver_data].name,
8590                     ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8591                     ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8592                     bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8593                     pdev->irq, dev->dev_addr);
8594
8595         return 0;
8596
8597 error:
8598         pci_iounmap(pdev, bp->regview);
8599         pci_release_regions(pdev);
8600         pci_disable_device(pdev);
8601 err_free:
8602         free_netdev(dev);
8603         return rc;
8604 }
8605
8606 static void
8607 bnx2_remove_one(struct pci_dev *pdev)
8608 {
8609         struct net_device *dev = pci_get_drvdata(pdev);
8610         struct bnx2 *bp = netdev_priv(dev);
8611
8612         unregister_netdev(dev);
8613
8614         del_timer_sync(&bp->timer);
8615         cancel_work_sync(&bp->reset_task);
8616
8617         pci_iounmap(bp->pdev, bp->regview);
8618
8619         kfree(bp->temp_stats_blk);
8620
8621         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8622                 pci_disable_pcie_error_reporting(pdev);
8623                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8624         }
8625
8626         bnx2_release_firmware(bp);
8627
8628         free_netdev(dev);
8629
8630         pci_release_regions(pdev);
8631         pci_disable_device(pdev);
8632 }
8633
8634 static int
8635 bnx2_suspend(struct device *device)
8636 {
8637         struct pci_dev *pdev = to_pci_dev(device);
8638         struct net_device *dev = pci_get_drvdata(pdev);
8639         struct bnx2 *bp = netdev_priv(dev);
8640
8641         if (netif_running(dev)) {
8642                 cancel_work_sync(&bp->reset_task);
8643                 bnx2_netif_stop(bp, true);
8644                 netif_device_detach(dev);
8645                 del_timer_sync(&bp->timer);
8646                 bnx2_shutdown_chip(bp);
8647                 __bnx2_free_irq(bp);
8648                 bnx2_free_skbs(bp);
8649         }
8650         bnx2_setup_wol(bp);
8651         return 0;
8652 }
8653
8654 static int
8655 bnx2_resume(struct device *device)
8656 {
8657         struct pci_dev *pdev = to_pci_dev(device);
8658         struct net_device *dev = pci_get_drvdata(pdev);
8659         struct bnx2 *bp = netdev_priv(dev);
8660
8661         if (!netif_running(dev))
8662                 return 0;
8663
8664         bnx2_set_power_state(bp, PCI_D0);
8665         netif_device_attach(dev);
8666         bnx2_request_irq(bp);
8667         bnx2_init_nic(bp, 1);
8668         bnx2_netif_start(bp, true);
8669         return 0;
8670 }
8671
8672 #ifdef CONFIG_PM_SLEEP
8673 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8674 #define BNX2_PM_OPS (&bnx2_pm_ops)
8675
8676 #else
8677
8678 #define BNX2_PM_OPS NULL
8679
8680 #endif /* CONFIG_PM_SLEEP */
8681 /**
8682  * bnx2_io_error_detected - called when PCI error is detected
8683  * @pdev: Pointer to PCI device
8684  * @state: The current pci connection state
8685  *
8686  * This function is called after a PCI bus error affecting
8687  * this device has been detected.
8688  */
8689 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8690                                                pci_channel_state_t state)
8691 {
8692         struct net_device *dev = pci_get_drvdata(pdev);
8693         struct bnx2 *bp = netdev_priv(dev);
8694
8695         rtnl_lock();
8696         netif_device_detach(dev);
8697
8698         if (state == pci_channel_io_perm_failure) {
8699                 rtnl_unlock();
8700                 return PCI_ERS_RESULT_DISCONNECT;
8701         }
8702
8703         if (netif_running(dev)) {
8704                 bnx2_netif_stop(bp, true);
8705                 del_timer_sync(&bp->timer);
8706                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8707         }
8708
8709         pci_disable_device(pdev);
8710         rtnl_unlock();
8711
8712         /* Request a slot slot reset. */
8713         return PCI_ERS_RESULT_NEED_RESET;
8714 }
8715
8716 /**
8717  * bnx2_io_slot_reset - called after the pci bus has been reset.
8718  * @pdev: Pointer to PCI device
8719  *
8720  * Restart the card from scratch, as if from a cold-boot.
8721  */
8722 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8723 {
8724         struct net_device *dev = pci_get_drvdata(pdev);
8725         struct bnx2 *bp = netdev_priv(dev);
8726         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8727         int err = 0;
8728
8729         rtnl_lock();
8730         if (pci_enable_device(pdev)) {
8731                 dev_err(&pdev->dev,
8732                         "Cannot re-enable PCI device after reset\n");
8733         } else {
8734                 pci_set_master(pdev);
8735                 pci_restore_state(pdev);
8736                 pci_save_state(pdev);
8737
8738                 if (netif_running(dev))
8739                         err = bnx2_init_nic(bp, 1);
8740
8741                 if (!err)
8742                         result = PCI_ERS_RESULT_RECOVERED;
8743         }
8744
8745         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8746                 bnx2_napi_enable(bp);
8747                 dev_close(dev);
8748         }
8749         rtnl_unlock();
8750
8751         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8752                 return result;
8753
8754         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8755         if (err) {
8756                 dev_err(&pdev->dev,
8757                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8758                          err); /* non-fatal, continue */
8759         }
8760
8761         return result;
8762 }
8763
8764 /**
8765  * bnx2_io_resume - called when traffic can start flowing again.
8766  * @pdev: Pointer to PCI device
8767  *
8768  * This callback is called when the error recovery driver tells us that
8769  * its OK to resume normal operation.
8770  */
8771 static void bnx2_io_resume(struct pci_dev *pdev)
8772 {
8773         struct net_device *dev = pci_get_drvdata(pdev);
8774         struct bnx2 *bp = netdev_priv(dev);
8775
8776         rtnl_lock();
8777         if (netif_running(dev))
8778                 bnx2_netif_start(bp, true);
8779
8780         netif_device_attach(dev);
8781         rtnl_unlock();
8782 }
8783
8784 static void bnx2_shutdown(struct pci_dev *pdev)
8785 {
8786         struct net_device *dev = pci_get_drvdata(pdev);
8787         struct bnx2 *bp;
8788
8789         if (!dev)
8790                 return;
8791
8792         bp = netdev_priv(dev);
8793         if (!bp)
8794                 return;
8795
8796         rtnl_lock();
8797         if (netif_running(dev))
8798                 dev_close(bp->dev);
8799
8800         if (system_state == SYSTEM_POWER_OFF)
8801                 bnx2_set_power_state(bp, PCI_D3hot);
8802
8803         rtnl_unlock();
8804 }
8805
8806 static const struct pci_error_handlers bnx2_err_handler = {
8807         .error_detected = bnx2_io_error_detected,
8808         .slot_reset     = bnx2_io_slot_reset,
8809         .resume         = bnx2_io_resume,
8810 };
8811
8812 static struct pci_driver bnx2_pci_driver = {
8813         .name           = DRV_MODULE_NAME,
8814         .id_table       = bnx2_pci_tbl,
8815         .probe          = bnx2_init_one,
8816         .remove         = bnx2_remove_one,
8817         .driver.pm      = BNX2_PM_OPS,
8818         .err_handler    = &bnx2_err_handler,
8819         .shutdown       = bnx2_shutdown,
8820 };
8821
8822 module_pci_driver(bnx2_pci_driver);