[BNX2]: Remove CTX_WR macro.
[sfrench/cifs-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.2"
60 #define DRV_MODULE_RELDATE      "January 21, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bnapi->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
270 {
271         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
272 }
273
274 static u32
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
276 {
277         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
278 }
279
280 static void
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
282 {
283         offset += cid_addr;
284         spin_lock_bh(&bp->indirect_lock);
285         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
286                 int i;
287
288                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291                 for (i = 0; i < 5; i++) {
292                         u32 val;
293                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
295                                 break;
296                         udelay(5);
297                 }
298         } else {
299                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300                 REG_WR(bp, BNX2_CTX_DATA, val);
301         }
302         spin_unlock_bh(&bp->indirect_lock);
303 }
304
305 static int
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
307 {
308         u32 val1;
309         int i, ret;
310
311         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318                 udelay(40);
319         }
320
321         val1 = (bp->phy_addr << 21) | (reg << 16) |
322                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323                 BNX2_EMAC_MDIO_COMM_START_BUSY;
324         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
325
326         for (i = 0; i < 50; i++) {
327                 udelay(10);
328
329                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331                         udelay(5);
332
333                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
335
336                         break;
337                 }
338         }
339
340         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
341                 *val = 0x0;
342                 ret = -EBUSY;
343         }
344         else {
345                 *val = val1;
346                 ret = 0;
347         }
348
349         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
352
353                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355
356                 udelay(40);
357         }
358
359         return ret;
360 }
361
362 static int
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
364 {
365         u32 val1;
366         int i, ret;
367
368         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
371
372                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374
375                 udelay(40);
376         }
377
378         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
382
383         for (i = 0; i < 50; i++) {
384                 udelay(10);
385
386                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
388                         udelay(5);
389                         break;
390                 }
391         }
392
393         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
394                 ret = -EBUSY;
395         else
396                 ret = 0;
397
398         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
401
402                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404
405                 udelay(40);
406         }
407
408         return ret;
409 }
410
411 static void
412 bnx2_disable_int(struct bnx2 *bp)
413 {
414         int i;
415         struct bnx2_napi *bnapi;
416
417         for (i = 0; i < bp->irq_nvecs; i++) {
418                 bnapi = &bp->bnx2_napi[i];
419                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
421         }
422         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
423 }
424
425 static void
426 bnx2_enable_int(struct bnx2 *bp)
427 {
428         int i;
429         struct bnx2_napi *bnapi;
430
431         for (i = 0; i < bp->irq_nvecs; i++) {
432                 bnapi = &bp->bnx2_napi[i];
433
434                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437                        bnapi->last_status_idx);
438
439                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441                        bnapi->last_status_idx);
442         }
443         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
444 }
445
446 static void
447 bnx2_disable_int_sync(struct bnx2 *bp)
448 {
449         int i;
450
451         atomic_inc(&bp->intr_sem);
452         bnx2_disable_int(bp);
453         for (i = 0; i < bp->irq_nvecs; i++)
454                 synchronize_irq(bp->irq_tbl[i].vector);
455 }
456
457 static void
458 bnx2_napi_disable(struct bnx2 *bp)
459 {
460         int i;
461
462         for (i = 0; i < bp->irq_nvecs; i++)
463                 napi_disable(&bp->bnx2_napi[i].napi);
464 }
465
466 static void
467 bnx2_napi_enable(struct bnx2 *bp)
468 {
469         int i;
470
471         for (i = 0; i < bp->irq_nvecs; i++)
472                 napi_enable(&bp->bnx2_napi[i].napi);
473 }
474
475 static void
476 bnx2_netif_stop(struct bnx2 *bp)
477 {
478         bnx2_disable_int_sync(bp);
479         if (netif_running(bp->dev)) {
480                 bnx2_napi_disable(bp);
481                 netif_tx_disable(bp->dev);
482                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
483         }
484 }
485
486 static void
487 bnx2_netif_start(struct bnx2 *bp)
488 {
489         if (atomic_dec_and_test(&bp->intr_sem)) {
490                 if (netif_running(bp->dev)) {
491                         netif_wake_queue(bp->dev);
492                         bnx2_napi_enable(bp);
493                         bnx2_enable_int(bp);
494                 }
495         }
496 }
497
498 static void
499 bnx2_free_mem(struct bnx2 *bp)
500 {
501         int i;
502
503         for (i = 0; i < bp->ctx_pages; i++) {
504                 if (bp->ctx_blk[i]) {
505                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
506                                             bp->ctx_blk[i],
507                                             bp->ctx_blk_mapping[i]);
508                         bp->ctx_blk[i] = NULL;
509                 }
510         }
511         if (bp->status_blk) {
512                 pci_free_consistent(bp->pdev, bp->status_stats_size,
513                                     bp->status_blk, bp->status_blk_mapping);
514                 bp->status_blk = NULL;
515                 bp->stats_blk = NULL;
516         }
517         if (bp->tx_desc_ring) {
518                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
519                                     bp->tx_desc_ring, bp->tx_desc_mapping);
520                 bp->tx_desc_ring = NULL;
521         }
522         kfree(bp->tx_buf_ring);
523         bp->tx_buf_ring = NULL;
524         for (i = 0; i < bp->rx_max_ring; i++) {
525                 if (bp->rx_desc_ring[i])
526                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
527                                             bp->rx_desc_ring[i],
528                                             bp->rx_desc_mapping[i]);
529                 bp->rx_desc_ring[i] = NULL;
530         }
531         vfree(bp->rx_buf_ring);
532         bp->rx_buf_ring = NULL;
533         for (i = 0; i < bp->rx_max_pg_ring; i++) {
534                 if (bp->rx_pg_desc_ring[i])
535                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
536                                             bp->rx_pg_desc_ring[i],
537                                             bp->rx_pg_desc_mapping[i]);
538                 bp->rx_pg_desc_ring[i] = NULL;
539         }
540         if (bp->rx_pg_ring)
541                 vfree(bp->rx_pg_ring);
542         bp->rx_pg_ring = NULL;
543 }
544
545 static int
546 bnx2_alloc_mem(struct bnx2 *bp)
547 {
548         int i, status_blk_size;
549
550         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
551         if (bp->tx_buf_ring == NULL)
552                 return -ENOMEM;
553
554         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
555                                                 &bp->tx_desc_mapping);
556         if (bp->tx_desc_ring == NULL)
557                 goto alloc_mem_err;
558
559         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
560         if (bp->rx_buf_ring == NULL)
561                 goto alloc_mem_err;
562
563         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
564
565         for (i = 0; i < bp->rx_max_ring; i++) {
566                 bp->rx_desc_ring[i] =
567                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
568                                              &bp->rx_desc_mapping[i]);
569                 if (bp->rx_desc_ring[i] == NULL)
570                         goto alloc_mem_err;
571
572         }
573
574         if (bp->rx_pg_ring_size) {
575                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
576                                          bp->rx_max_pg_ring);
577                 if (bp->rx_pg_ring == NULL)
578                         goto alloc_mem_err;
579
580                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
581                        bp->rx_max_pg_ring);
582         }
583
584         for (i = 0; i < bp->rx_max_pg_ring; i++) {
585                 bp->rx_pg_desc_ring[i] =
586                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
587                                              &bp->rx_pg_desc_mapping[i]);
588                 if (bp->rx_pg_desc_ring[i] == NULL)
589                         goto alloc_mem_err;
590
591         }
592
593         /* Combine status and statistics blocks into one allocation. */
594         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
595         if (bp->flags & BNX2_FLAG_MSIX_CAP)
596                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
597                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
598         bp->status_stats_size = status_blk_size +
599                                 sizeof(struct statistics_block);
600
601         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
602                                               &bp->status_blk_mapping);
603         if (bp->status_blk == NULL)
604                 goto alloc_mem_err;
605
606         memset(bp->status_blk, 0, bp->status_stats_size);
607
608         bp->bnx2_napi[0].status_blk = bp->status_blk;
609         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
610                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
611                         struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
612
613                         bnapi->status_blk_msix = (void *)
614                                 ((unsigned long) bp->status_blk +
615                                  BNX2_SBLK_MSIX_ALIGN_SIZE * i);
616                         bnapi->int_num = i << 24;
617                 }
618         }
619
620         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
621                                   status_blk_size);
622
623         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
624
625         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
626                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
627                 if (bp->ctx_pages == 0)
628                         bp->ctx_pages = 1;
629                 for (i = 0; i < bp->ctx_pages; i++) {
630                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
631                                                 BCM_PAGE_SIZE,
632                                                 &bp->ctx_blk_mapping[i]);
633                         if (bp->ctx_blk[i] == NULL)
634                                 goto alloc_mem_err;
635                 }
636         }
637         return 0;
638
639 alloc_mem_err:
640         bnx2_free_mem(bp);
641         return -ENOMEM;
642 }
643
644 static void
645 bnx2_report_fw_link(struct bnx2 *bp)
646 {
647         u32 fw_link_status = 0;
648
649         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
650                 return;
651
652         if (bp->link_up) {
653                 u32 bmsr;
654
655                 switch (bp->line_speed) {
656                 case SPEED_10:
657                         if (bp->duplex == DUPLEX_HALF)
658                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
659                         else
660                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
661                         break;
662                 case SPEED_100:
663                         if (bp->duplex == DUPLEX_HALF)
664                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
665                         else
666                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
667                         break;
668                 case SPEED_1000:
669                         if (bp->duplex == DUPLEX_HALF)
670                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
671                         else
672                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
673                         break;
674                 case SPEED_2500:
675                         if (bp->duplex == DUPLEX_HALF)
676                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
677                         else
678                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
679                         break;
680                 }
681
682                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
683
684                 if (bp->autoneg) {
685                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
686
687                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
688                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
689
690                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
691                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
692                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
693                         else
694                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
695                 }
696         }
697         else
698                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
699
700         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
701 }
702
703 static char *
704 bnx2_xceiver_str(struct bnx2 *bp)
705 {
706         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
707                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
708                  "Copper"));
709 }
710
711 static void
712 bnx2_report_link(struct bnx2 *bp)
713 {
714         if (bp->link_up) {
715                 netif_carrier_on(bp->dev);
716                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
717                        bnx2_xceiver_str(bp));
718
719                 printk("%d Mbps ", bp->line_speed);
720
721                 if (bp->duplex == DUPLEX_FULL)
722                         printk("full duplex");
723                 else
724                         printk("half duplex");
725
726                 if (bp->flow_ctrl) {
727                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
728                                 printk(", receive ");
729                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
730                                         printk("& transmit ");
731                         }
732                         else {
733                                 printk(", transmit ");
734                         }
735                         printk("flow control ON");
736                 }
737                 printk("\n");
738         }
739         else {
740                 netif_carrier_off(bp->dev);
741                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
742                        bnx2_xceiver_str(bp));
743         }
744
745         bnx2_report_fw_link(bp);
746 }
747
748 static void
749 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
750 {
751         u32 local_adv, remote_adv;
752
753         bp->flow_ctrl = 0;
754         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
755                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
756
757                 if (bp->duplex == DUPLEX_FULL) {
758                         bp->flow_ctrl = bp->req_flow_ctrl;
759                 }
760                 return;
761         }
762
763         if (bp->duplex != DUPLEX_FULL) {
764                 return;
765         }
766
767         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
768             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
769                 u32 val;
770
771                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
772                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
773                         bp->flow_ctrl |= FLOW_CTRL_TX;
774                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
775                         bp->flow_ctrl |= FLOW_CTRL_RX;
776                 return;
777         }
778
779         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
780         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
781
782         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
783                 u32 new_local_adv = 0;
784                 u32 new_remote_adv = 0;
785
786                 if (local_adv & ADVERTISE_1000XPAUSE)
787                         new_local_adv |= ADVERTISE_PAUSE_CAP;
788                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
789                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
790                 if (remote_adv & ADVERTISE_1000XPAUSE)
791                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
792                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
793                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
794
795                 local_adv = new_local_adv;
796                 remote_adv = new_remote_adv;
797         }
798
799         /* See Table 28B-3 of 802.3ab-1999 spec. */
800         if (local_adv & ADVERTISE_PAUSE_CAP) {
801                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
802                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
803                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
804                         }
805                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
806                                 bp->flow_ctrl = FLOW_CTRL_RX;
807                         }
808                 }
809                 else {
810                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
811                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
812                         }
813                 }
814         }
815         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
816                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
817                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
818
819                         bp->flow_ctrl = FLOW_CTRL_TX;
820                 }
821         }
822 }
823
824 static int
825 bnx2_5709s_linkup(struct bnx2 *bp)
826 {
827         u32 val, speed;
828
829         bp->link_up = 1;
830
831         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
832         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
833         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
834
835         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
836                 bp->line_speed = bp->req_line_speed;
837                 bp->duplex = bp->req_duplex;
838                 return 0;
839         }
840         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
841         switch (speed) {
842                 case MII_BNX2_GP_TOP_AN_SPEED_10:
843                         bp->line_speed = SPEED_10;
844                         break;
845                 case MII_BNX2_GP_TOP_AN_SPEED_100:
846                         bp->line_speed = SPEED_100;
847                         break;
848                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
849                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
850                         bp->line_speed = SPEED_1000;
851                         break;
852                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
853                         bp->line_speed = SPEED_2500;
854                         break;
855         }
856         if (val & MII_BNX2_GP_TOP_AN_FD)
857                 bp->duplex = DUPLEX_FULL;
858         else
859                 bp->duplex = DUPLEX_HALF;
860         return 0;
861 }
862
863 static int
864 bnx2_5708s_linkup(struct bnx2 *bp)
865 {
866         u32 val;
867
868         bp->link_up = 1;
869         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
870         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
871                 case BCM5708S_1000X_STAT1_SPEED_10:
872                         bp->line_speed = SPEED_10;
873                         break;
874                 case BCM5708S_1000X_STAT1_SPEED_100:
875                         bp->line_speed = SPEED_100;
876                         break;
877                 case BCM5708S_1000X_STAT1_SPEED_1G:
878                         bp->line_speed = SPEED_1000;
879                         break;
880                 case BCM5708S_1000X_STAT1_SPEED_2G5:
881                         bp->line_speed = SPEED_2500;
882                         break;
883         }
884         if (val & BCM5708S_1000X_STAT1_FD)
885                 bp->duplex = DUPLEX_FULL;
886         else
887                 bp->duplex = DUPLEX_HALF;
888
889         return 0;
890 }
891
892 static int
893 bnx2_5706s_linkup(struct bnx2 *bp)
894 {
895         u32 bmcr, local_adv, remote_adv, common;
896
897         bp->link_up = 1;
898         bp->line_speed = SPEED_1000;
899
900         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
901         if (bmcr & BMCR_FULLDPLX) {
902                 bp->duplex = DUPLEX_FULL;
903         }
904         else {
905                 bp->duplex = DUPLEX_HALF;
906         }
907
908         if (!(bmcr & BMCR_ANENABLE)) {
909                 return 0;
910         }
911
912         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
913         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
914
915         common = local_adv & remote_adv;
916         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
917
918                 if (common & ADVERTISE_1000XFULL) {
919                         bp->duplex = DUPLEX_FULL;
920                 }
921                 else {
922                         bp->duplex = DUPLEX_HALF;
923                 }
924         }
925
926         return 0;
927 }
928
929 static int
930 bnx2_copper_linkup(struct bnx2 *bp)
931 {
932         u32 bmcr;
933
934         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
935         if (bmcr & BMCR_ANENABLE) {
936                 u32 local_adv, remote_adv, common;
937
938                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
939                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
940
941                 common = local_adv & (remote_adv >> 2);
942                 if (common & ADVERTISE_1000FULL) {
943                         bp->line_speed = SPEED_1000;
944                         bp->duplex = DUPLEX_FULL;
945                 }
946                 else if (common & ADVERTISE_1000HALF) {
947                         bp->line_speed = SPEED_1000;
948                         bp->duplex = DUPLEX_HALF;
949                 }
950                 else {
951                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
952                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
953
954                         common = local_adv & remote_adv;
955                         if (common & ADVERTISE_100FULL) {
956                                 bp->line_speed = SPEED_100;
957                                 bp->duplex = DUPLEX_FULL;
958                         }
959                         else if (common & ADVERTISE_100HALF) {
960                                 bp->line_speed = SPEED_100;
961                                 bp->duplex = DUPLEX_HALF;
962                         }
963                         else if (common & ADVERTISE_10FULL) {
964                                 bp->line_speed = SPEED_10;
965                                 bp->duplex = DUPLEX_FULL;
966                         }
967                         else if (common & ADVERTISE_10HALF) {
968                                 bp->line_speed = SPEED_10;
969                                 bp->duplex = DUPLEX_HALF;
970                         }
971                         else {
972                                 bp->line_speed = 0;
973                                 bp->link_up = 0;
974                         }
975                 }
976         }
977         else {
978                 if (bmcr & BMCR_SPEED100) {
979                         bp->line_speed = SPEED_100;
980                 }
981                 else {
982                         bp->line_speed = SPEED_10;
983                 }
984                 if (bmcr & BMCR_FULLDPLX) {
985                         bp->duplex = DUPLEX_FULL;
986                 }
987                 else {
988                         bp->duplex = DUPLEX_HALF;
989                 }
990         }
991
992         return 0;
993 }
994
995 static int
996 bnx2_set_mac_link(struct bnx2 *bp)
997 {
998         u32 val;
999
1000         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1001         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1002                 (bp->duplex == DUPLEX_HALF)) {
1003                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1004         }
1005
1006         /* Configure the EMAC mode register. */
1007         val = REG_RD(bp, BNX2_EMAC_MODE);
1008
1009         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1010                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1011                 BNX2_EMAC_MODE_25G_MODE);
1012
1013         if (bp->link_up) {
1014                 switch (bp->line_speed) {
1015                         case SPEED_10:
1016                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1017                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1018                                         break;
1019                                 }
1020                                 /* fall through */
1021                         case SPEED_100:
1022                                 val |= BNX2_EMAC_MODE_PORT_MII;
1023                                 break;
1024                         case SPEED_2500:
1025                                 val |= BNX2_EMAC_MODE_25G_MODE;
1026                                 /* fall through */
1027                         case SPEED_1000:
1028                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1029                                 break;
1030                 }
1031         }
1032         else {
1033                 val |= BNX2_EMAC_MODE_PORT_GMII;
1034         }
1035
1036         /* Set the MAC to operate in the appropriate duplex mode. */
1037         if (bp->duplex == DUPLEX_HALF)
1038                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1039         REG_WR(bp, BNX2_EMAC_MODE, val);
1040
1041         /* Enable/disable rx PAUSE. */
1042         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1043
1044         if (bp->flow_ctrl & FLOW_CTRL_RX)
1045                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1046         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1047
1048         /* Enable/disable tx PAUSE. */
1049         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1050         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1051
1052         if (bp->flow_ctrl & FLOW_CTRL_TX)
1053                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1054         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1055
1056         /* Acknowledge the interrupt. */
1057         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1058
1059         return 0;
1060 }
1061
1062 static void
1063 bnx2_enable_bmsr1(struct bnx2 *bp)
1064 {
1065         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1066             (CHIP_NUM(bp) == CHIP_NUM_5709))
1067                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1068                                MII_BNX2_BLK_ADDR_GP_STATUS);
1069 }
1070
1071 static void
1072 bnx2_disable_bmsr1(struct bnx2 *bp)
1073 {
1074         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1075             (CHIP_NUM(bp) == CHIP_NUM_5709))
1076                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1077                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1078 }
1079
1080 static int
1081 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1082 {
1083         u32 up1;
1084         int ret = 1;
1085
1086         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1087                 return 0;
1088
1089         if (bp->autoneg & AUTONEG_SPEED)
1090                 bp->advertising |= ADVERTISED_2500baseX_Full;
1091
1092         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1093                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1094
1095         bnx2_read_phy(bp, bp->mii_up1, &up1);
1096         if (!(up1 & BCM5708S_UP1_2G5)) {
1097                 up1 |= BCM5708S_UP1_2G5;
1098                 bnx2_write_phy(bp, bp->mii_up1, up1);
1099                 ret = 0;
1100         }
1101
1102         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1103                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1104                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1105
1106         return ret;
1107 }
1108
1109 static int
1110 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1111 {
1112         u32 up1;
1113         int ret = 0;
1114
1115         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1116                 return 0;
1117
1118         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1119                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1120
1121         bnx2_read_phy(bp, bp->mii_up1, &up1);
1122         if (up1 & BCM5708S_UP1_2G5) {
1123                 up1 &= ~BCM5708S_UP1_2G5;
1124                 bnx2_write_phy(bp, bp->mii_up1, up1);
1125                 ret = 1;
1126         }
1127
1128         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1129                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1130                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1131
1132         return ret;
1133 }
1134
1135 static void
1136 bnx2_enable_forced_2g5(struct bnx2 *bp)
1137 {
1138         u32 bmcr;
1139
1140         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1141                 return;
1142
1143         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1144                 u32 val;
1145
1146                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1147                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1148                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1149                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1150                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1151                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1152
1153                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1154                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1155                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1156
1157         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1158                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1159                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1160         }
1161
1162         if (bp->autoneg & AUTONEG_SPEED) {
1163                 bmcr &= ~BMCR_ANENABLE;
1164                 if (bp->req_duplex == DUPLEX_FULL)
1165                         bmcr |= BMCR_FULLDPLX;
1166         }
1167         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1168 }
1169
1170 static void
1171 bnx2_disable_forced_2g5(struct bnx2 *bp)
1172 {
1173         u32 bmcr;
1174
1175         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1176                 return;
1177
1178         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1179                 u32 val;
1180
1181                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1182                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1183                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1184                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1185                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1186
1187                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1188                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1189                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1190
1191         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1192                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1193                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1194         }
1195
1196         if (bp->autoneg & AUTONEG_SPEED)
1197                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1198         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1199 }
1200
1201 static void
1202 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1203 {
1204         u32 val;
1205
1206         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1207         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1208         if (start)
1209                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1210         else
1211                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1212 }
1213
1214 static int
1215 bnx2_set_link(struct bnx2 *bp)
1216 {
1217         u32 bmsr;
1218         u8 link_up;
1219
1220         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1221                 bp->link_up = 1;
1222                 return 0;
1223         }
1224
1225         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1226                 return 0;
1227
1228         link_up = bp->link_up;
1229
1230         bnx2_enable_bmsr1(bp);
1231         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1232         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1233         bnx2_disable_bmsr1(bp);
1234
1235         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1236             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1237                 u32 val;
1238
1239                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1240                         bnx2_5706s_force_link_dn(bp, 0);
1241                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1242                 }
1243                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1244                 if (val & BNX2_EMAC_STATUS_LINK)
1245                         bmsr |= BMSR_LSTATUS;
1246                 else
1247                         bmsr &= ~BMSR_LSTATUS;
1248         }
1249
1250         if (bmsr & BMSR_LSTATUS) {
1251                 bp->link_up = 1;
1252
1253                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1254                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1255                                 bnx2_5706s_linkup(bp);
1256                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1257                                 bnx2_5708s_linkup(bp);
1258                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1259                                 bnx2_5709s_linkup(bp);
1260                 }
1261                 else {
1262                         bnx2_copper_linkup(bp);
1263                 }
1264                 bnx2_resolve_flow_ctrl(bp);
1265         }
1266         else {
1267                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1268                     (bp->autoneg & AUTONEG_SPEED))
1269                         bnx2_disable_forced_2g5(bp);
1270
1271                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1272                         u32 bmcr;
1273
1274                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1275                         bmcr |= BMCR_ANENABLE;
1276                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1277
1278                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1279                 }
1280                 bp->link_up = 0;
1281         }
1282
1283         if (bp->link_up != link_up) {
1284                 bnx2_report_link(bp);
1285         }
1286
1287         bnx2_set_mac_link(bp);
1288
1289         return 0;
1290 }
1291
1292 static int
1293 bnx2_reset_phy(struct bnx2 *bp)
1294 {
1295         int i;
1296         u32 reg;
1297
1298         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1299
1300 #define PHY_RESET_MAX_WAIT 100
1301         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1302                 udelay(10);
1303
1304                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1305                 if (!(reg & BMCR_RESET)) {
1306                         udelay(20);
1307                         break;
1308                 }
1309         }
1310         if (i == PHY_RESET_MAX_WAIT) {
1311                 return -EBUSY;
1312         }
1313         return 0;
1314 }
1315
1316 static u32
1317 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1318 {
1319         u32 adv = 0;
1320
1321         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1322                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1323
1324                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1325                         adv = ADVERTISE_1000XPAUSE;
1326                 }
1327                 else {
1328                         adv = ADVERTISE_PAUSE_CAP;
1329                 }
1330         }
1331         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1332                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1333                         adv = ADVERTISE_1000XPSE_ASYM;
1334                 }
1335                 else {
1336                         adv = ADVERTISE_PAUSE_ASYM;
1337                 }
1338         }
1339         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1340                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1341                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1342                 }
1343                 else {
1344                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1345                 }
1346         }
1347         return adv;
1348 }
1349
1350 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1351
1352 static int
1353 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1354 {
1355         u32 speed_arg = 0, pause_adv;
1356
1357         pause_adv = bnx2_phy_get_pause_adv(bp);
1358
1359         if (bp->autoneg & AUTONEG_SPEED) {
1360                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1361                 if (bp->advertising & ADVERTISED_10baseT_Half)
1362                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1363                 if (bp->advertising & ADVERTISED_10baseT_Full)
1364                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1365                 if (bp->advertising & ADVERTISED_100baseT_Half)
1366                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1367                 if (bp->advertising & ADVERTISED_100baseT_Full)
1368                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1369                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1370                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1371                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1372                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1373         } else {
1374                 if (bp->req_line_speed == SPEED_2500)
1375                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1376                 else if (bp->req_line_speed == SPEED_1000)
1377                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1378                 else if (bp->req_line_speed == SPEED_100) {
1379                         if (bp->req_duplex == DUPLEX_FULL)
1380                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1381                         else
1382                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1383                 } else if (bp->req_line_speed == SPEED_10) {
1384                         if (bp->req_duplex == DUPLEX_FULL)
1385                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1386                         else
1387                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1388                 }
1389         }
1390
1391         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1392                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1393         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1394                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1395
1396         if (port == PORT_TP)
1397                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1398                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1399
1400         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1401
1402         spin_unlock_bh(&bp->phy_lock);
1403         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1404         spin_lock_bh(&bp->phy_lock);
1405
1406         return 0;
1407 }
1408
1409 static int
1410 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1411 {
1412         u32 adv, bmcr;
1413         u32 new_adv = 0;
1414
1415         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1416                 return (bnx2_setup_remote_phy(bp, port));
1417
1418         if (!(bp->autoneg & AUTONEG_SPEED)) {
1419                 u32 new_bmcr;
1420                 int force_link_down = 0;
1421
1422                 if (bp->req_line_speed == SPEED_2500) {
1423                         if (!bnx2_test_and_enable_2g5(bp))
1424                                 force_link_down = 1;
1425                 } else if (bp->req_line_speed == SPEED_1000) {
1426                         if (bnx2_test_and_disable_2g5(bp))
1427                                 force_link_down = 1;
1428                 }
1429                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1430                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1431
1432                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1433                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1434                 new_bmcr |= BMCR_SPEED1000;
1435
1436                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1437                         if (bp->req_line_speed == SPEED_2500)
1438                                 bnx2_enable_forced_2g5(bp);
1439                         else if (bp->req_line_speed == SPEED_1000) {
1440                                 bnx2_disable_forced_2g5(bp);
1441                                 new_bmcr &= ~0x2000;
1442                         }
1443
1444                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1445                         if (bp->req_line_speed == SPEED_2500)
1446                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1447                         else
1448                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1449                 }
1450
1451                 if (bp->req_duplex == DUPLEX_FULL) {
1452                         adv |= ADVERTISE_1000XFULL;
1453                         new_bmcr |= BMCR_FULLDPLX;
1454                 }
1455                 else {
1456                         adv |= ADVERTISE_1000XHALF;
1457                         new_bmcr &= ~BMCR_FULLDPLX;
1458                 }
1459                 if ((new_bmcr != bmcr) || (force_link_down)) {
1460                         /* Force a link down visible on the other side */
1461                         if (bp->link_up) {
1462                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1463                                                ~(ADVERTISE_1000XFULL |
1464                                                  ADVERTISE_1000XHALF));
1465                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1466                                         BMCR_ANRESTART | BMCR_ANENABLE);
1467
1468                                 bp->link_up = 0;
1469                                 netif_carrier_off(bp->dev);
1470                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1471                                 bnx2_report_link(bp);
1472                         }
1473                         bnx2_write_phy(bp, bp->mii_adv, adv);
1474                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1475                 } else {
1476                         bnx2_resolve_flow_ctrl(bp);
1477                         bnx2_set_mac_link(bp);
1478                 }
1479                 return 0;
1480         }
1481
1482         bnx2_test_and_enable_2g5(bp);
1483
1484         if (bp->advertising & ADVERTISED_1000baseT_Full)
1485                 new_adv |= ADVERTISE_1000XFULL;
1486
1487         new_adv |= bnx2_phy_get_pause_adv(bp);
1488
1489         bnx2_read_phy(bp, bp->mii_adv, &adv);
1490         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1491
1492         bp->serdes_an_pending = 0;
1493         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1494                 /* Force a link down visible on the other side */
1495                 if (bp->link_up) {
1496                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1497                         spin_unlock_bh(&bp->phy_lock);
1498                         msleep(20);
1499                         spin_lock_bh(&bp->phy_lock);
1500                 }
1501
1502                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1503                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1504                         BMCR_ANENABLE);
1505                 /* Speed up link-up time when the link partner
1506                  * does not autonegotiate which is very common
1507                  * in blade servers. Some blade servers use
1508                  * IPMI for kerboard input and it's important
1509                  * to minimize link disruptions. Autoneg. involves
1510                  * exchanging base pages plus 3 next pages and
1511                  * normally completes in about 120 msec.
1512                  */
1513                 bp->current_interval = SERDES_AN_TIMEOUT;
1514                 bp->serdes_an_pending = 1;
1515                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1516         } else {
1517                 bnx2_resolve_flow_ctrl(bp);
1518                 bnx2_set_mac_link(bp);
1519         }
1520
1521         return 0;
1522 }
1523
1524 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1525         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1526                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1527                 (ADVERTISED_1000baseT_Full)
1528
1529 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1530         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1531         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1532         ADVERTISED_1000baseT_Full)
1533
1534 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1535         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1536
1537 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1538
1539 static void
1540 bnx2_set_default_remote_link(struct bnx2 *bp)
1541 {
1542         u32 link;
1543
1544         if (bp->phy_port == PORT_TP)
1545                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1546         else
1547                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1548
1549         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1550                 bp->req_line_speed = 0;
1551                 bp->autoneg |= AUTONEG_SPEED;
1552                 bp->advertising = ADVERTISED_Autoneg;
1553                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1554                         bp->advertising |= ADVERTISED_10baseT_Half;
1555                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1556                         bp->advertising |= ADVERTISED_10baseT_Full;
1557                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1558                         bp->advertising |= ADVERTISED_100baseT_Half;
1559                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1560                         bp->advertising |= ADVERTISED_100baseT_Full;
1561                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1562                         bp->advertising |= ADVERTISED_1000baseT_Full;
1563                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1564                         bp->advertising |= ADVERTISED_2500baseX_Full;
1565         } else {
1566                 bp->autoneg = 0;
1567                 bp->advertising = 0;
1568                 bp->req_duplex = DUPLEX_FULL;
1569                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1570                         bp->req_line_speed = SPEED_10;
1571                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1572                                 bp->req_duplex = DUPLEX_HALF;
1573                 }
1574                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1575                         bp->req_line_speed = SPEED_100;
1576                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1577                                 bp->req_duplex = DUPLEX_HALF;
1578                 }
1579                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1580                         bp->req_line_speed = SPEED_1000;
1581                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1582                         bp->req_line_speed = SPEED_2500;
1583         }
1584 }
1585
1586 static void
1587 bnx2_set_default_link(struct bnx2 *bp)
1588 {
1589         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1590                 return bnx2_set_default_remote_link(bp);
1591
1592         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1593         bp->req_line_speed = 0;
1594         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1595                 u32 reg;
1596
1597                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1598
1599                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1600                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1601                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1602                         bp->autoneg = 0;
1603                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1604                         bp->req_duplex = DUPLEX_FULL;
1605                 }
1606         } else
1607                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1608 }
1609
1610 static void
1611 bnx2_send_heart_beat(struct bnx2 *bp)
1612 {
1613         u32 msg;
1614         u32 addr;
1615
1616         spin_lock(&bp->indirect_lock);
1617         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1618         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1619         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1620         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1621         spin_unlock(&bp->indirect_lock);
1622 }
1623
1624 static void
1625 bnx2_remote_phy_event(struct bnx2 *bp)
1626 {
1627         u32 msg;
1628         u8 link_up = bp->link_up;
1629         u8 old_port;
1630
1631         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1632
1633         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1634                 bnx2_send_heart_beat(bp);
1635
1636         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1637
1638         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1639                 bp->link_up = 0;
1640         else {
1641                 u32 speed;
1642
1643                 bp->link_up = 1;
1644                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1645                 bp->duplex = DUPLEX_FULL;
1646                 switch (speed) {
1647                         case BNX2_LINK_STATUS_10HALF:
1648                                 bp->duplex = DUPLEX_HALF;
1649                         case BNX2_LINK_STATUS_10FULL:
1650                                 bp->line_speed = SPEED_10;
1651                                 break;
1652                         case BNX2_LINK_STATUS_100HALF:
1653                                 bp->duplex = DUPLEX_HALF;
1654                         case BNX2_LINK_STATUS_100BASE_T4:
1655                         case BNX2_LINK_STATUS_100FULL:
1656                                 bp->line_speed = SPEED_100;
1657                                 break;
1658                         case BNX2_LINK_STATUS_1000HALF:
1659                                 bp->duplex = DUPLEX_HALF;
1660                         case BNX2_LINK_STATUS_1000FULL:
1661                                 bp->line_speed = SPEED_1000;
1662                                 break;
1663                         case BNX2_LINK_STATUS_2500HALF:
1664                                 bp->duplex = DUPLEX_HALF;
1665                         case BNX2_LINK_STATUS_2500FULL:
1666                                 bp->line_speed = SPEED_2500;
1667                                 break;
1668                         default:
1669                                 bp->line_speed = 0;
1670                                 break;
1671                 }
1672
1673                 spin_lock(&bp->phy_lock);
1674                 bp->flow_ctrl = 0;
1675                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1676                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1677                         if (bp->duplex == DUPLEX_FULL)
1678                                 bp->flow_ctrl = bp->req_flow_ctrl;
1679                 } else {
1680                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1681                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1682                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1683                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1684                 }
1685
1686                 old_port = bp->phy_port;
1687                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1688                         bp->phy_port = PORT_FIBRE;
1689                 else
1690                         bp->phy_port = PORT_TP;
1691
1692                 if (old_port != bp->phy_port)
1693                         bnx2_set_default_link(bp);
1694
1695                 spin_unlock(&bp->phy_lock);
1696         }
1697         if (bp->link_up != link_up)
1698                 bnx2_report_link(bp);
1699
1700         bnx2_set_mac_link(bp);
1701 }
1702
1703 static int
1704 bnx2_set_remote_link(struct bnx2 *bp)
1705 {
1706         u32 evt_code;
1707
1708         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1709         switch (evt_code) {
1710                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1711                         bnx2_remote_phy_event(bp);
1712                         break;
1713                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1714                 default:
1715                         bnx2_send_heart_beat(bp);
1716                         break;
1717         }
1718         return 0;
1719 }
1720
1721 static int
1722 bnx2_setup_copper_phy(struct bnx2 *bp)
1723 {
1724         u32 bmcr;
1725         u32 new_bmcr;
1726
1727         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1728
1729         if (bp->autoneg & AUTONEG_SPEED) {
1730                 u32 adv_reg, adv1000_reg;
1731                 u32 new_adv_reg = 0;
1732                 u32 new_adv1000_reg = 0;
1733
1734                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1735                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1736                         ADVERTISE_PAUSE_ASYM);
1737
1738                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1739                 adv1000_reg &= PHY_ALL_1000_SPEED;
1740
1741                 if (bp->advertising & ADVERTISED_10baseT_Half)
1742                         new_adv_reg |= ADVERTISE_10HALF;
1743                 if (bp->advertising & ADVERTISED_10baseT_Full)
1744                         new_adv_reg |= ADVERTISE_10FULL;
1745                 if (bp->advertising & ADVERTISED_100baseT_Half)
1746                         new_adv_reg |= ADVERTISE_100HALF;
1747                 if (bp->advertising & ADVERTISED_100baseT_Full)
1748                         new_adv_reg |= ADVERTISE_100FULL;
1749                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1750                         new_adv1000_reg |= ADVERTISE_1000FULL;
1751
1752                 new_adv_reg |= ADVERTISE_CSMA;
1753
1754                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1755
1756                 if ((adv1000_reg != new_adv1000_reg) ||
1757                         (adv_reg != new_adv_reg) ||
1758                         ((bmcr & BMCR_ANENABLE) == 0)) {
1759
1760                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1761                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1762                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1763                                 BMCR_ANENABLE);
1764                 }
1765                 else if (bp->link_up) {
1766                         /* Flow ctrl may have changed from auto to forced */
1767                         /* or vice-versa. */
1768
1769                         bnx2_resolve_flow_ctrl(bp);
1770                         bnx2_set_mac_link(bp);
1771                 }
1772                 return 0;
1773         }
1774
1775         new_bmcr = 0;
1776         if (bp->req_line_speed == SPEED_100) {
1777                 new_bmcr |= BMCR_SPEED100;
1778         }
1779         if (bp->req_duplex == DUPLEX_FULL) {
1780                 new_bmcr |= BMCR_FULLDPLX;
1781         }
1782         if (new_bmcr != bmcr) {
1783                 u32 bmsr;
1784
1785                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1786                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1787
1788                 if (bmsr & BMSR_LSTATUS) {
1789                         /* Force link down */
1790                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1791                         spin_unlock_bh(&bp->phy_lock);
1792                         msleep(50);
1793                         spin_lock_bh(&bp->phy_lock);
1794
1795                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1796                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1797                 }
1798
1799                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1800
1801                 /* Normally, the new speed is setup after the link has
1802                  * gone down and up again. In some cases, link will not go
1803                  * down so we need to set up the new speed here.
1804                  */
1805                 if (bmsr & BMSR_LSTATUS) {
1806                         bp->line_speed = bp->req_line_speed;
1807                         bp->duplex = bp->req_duplex;
1808                         bnx2_resolve_flow_ctrl(bp);
1809                         bnx2_set_mac_link(bp);
1810                 }
1811         } else {
1812                 bnx2_resolve_flow_ctrl(bp);
1813                 bnx2_set_mac_link(bp);
1814         }
1815         return 0;
1816 }
1817
1818 static int
1819 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1820 {
1821         if (bp->loopback == MAC_LOOPBACK)
1822                 return 0;
1823
1824         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1825                 return (bnx2_setup_serdes_phy(bp, port));
1826         }
1827         else {
1828                 return (bnx2_setup_copper_phy(bp));
1829         }
1830 }
1831
1832 static int
1833 bnx2_init_5709s_phy(struct bnx2 *bp)
1834 {
1835         u32 val;
1836
1837         bp->mii_bmcr = MII_BMCR + 0x10;
1838         bp->mii_bmsr = MII_BMSR + 0x10;
1839         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1840         bp->mii_adv = MII_ADVERTISE + 0x10;
1841         bp->mii_lpa = MII_LPA + 0x10;
1842         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1843
1844         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1845         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1846
1847         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1848         bnx2_reset_phy(bp);
1849
1850         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1851
1852         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1853         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1854         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1855         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1856
1857         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1858         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1859         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1860                 val |= BCM5708S_UP1_2G5;
1861         else
1862                 val &= ~BCM5708S_UP1_2G5;
1863         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1864
1865         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1866         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1867         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1868         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1869
1870         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1871
1872         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1873               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1874         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1875
1876         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1877
1878         return 0;
1879 }
1880
1881 static int
1882 bnx2_init_5708s_phy(struct bnx2 *bp)
1883 {
1884         u32 val;
1885
1886         bnx2_reset_phy(bp);
1887
1888         bp->mii_up1 = BCM5708S_UP1;
1889
1890         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1891         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1892         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1893
1894         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1895         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1896         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1897
1898         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1899         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1900         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1901
1902         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
1903                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1904                 val |= BCM5708S_UP1_2G5;
1905                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1906         }
1907
1908         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1909             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1910             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1911                 /* increase tx signal amplitude */
1912                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1913                                BCM5708S_BLK_ADDR_TX_MISC);
1914                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1915                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1916                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1917                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1918         }
1919
1920         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
1921               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1922
1923         if (val) {
1924                 u32 is_backplane;
1925
1926                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
1927                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1928                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1929                                        BCM5708S_BLK_ADDR_TX_MISC);
1930                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1931                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1932                                        BCM5708S_BLK_ADDR_DIG);
1933                 }
1934         }
1935         return 0;
1936 }
1937
1938 static int
1939 bnx2_init_5706s_phy(struct bnx2 *bp)
1940 {
1941         bnx2_reset_phy(bp);
1942
1943         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1944
1945         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1946                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1947
1948         if (bp->dev->mtu > 1500) {
1949                 u32 val;
1950
1951                 /* Set extended packet length bit */
1952                 bnx2_write_phy(bp, 0x18, 0x7);
1953                 bnx2_read_phy(bp, 0x18, &val);
1954                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1955
1956                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1957                 bnx2_read_phy(bp, 0x1c, &val);
1958                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1959         }
1960         else {
1961                 u32 val;
1962
1963                 bnx2_write_phy(bp, 0x18, 0x7);
1964                 bnx2_read_phy(bp, 0x18, &val);
1965                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1966
1967                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1968                 bnx2_read_phy(bp, 0x1c, &val);
1969                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1970         }
1971
1972         return 0;
1973 }
1974
1975 static int
1976 bnx2_init_copper_phy(struct bnx2 *bp)
1977 {
1978         u32 val;
1979
1980         bnx2_reset_phy(bp);
1981
1982         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
1983                 bnx2_write_phy(bp, 0x18, 0x0c00);
1984                 bnx2_write_phy(bp, 0x17, 0x000a);
1985                 bnx2_write_phy(bp, 0x15, 0x310b);
1986                 bnx2_write_phy(bp, 0x17, 0x201f);
1987                 bnx2_write_phy(bp, 0x15, 0x9506);
1988                 bnx2_write_phy(bp, 0x17, 0x401f);
1989                 bnx2_write_phy(bp, 0x15, 0x14e2);
1990                 bnx2_write_phy(bp, 0x18, 0x0400);
1991         }
1992
1993         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
1994                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1995                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1996                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1997                 val &= ~(1 << 8);
1998                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1999         }
2000
2001         if (bp->dev->mtu > 1500) {
2002                 /* Set extended packet length bit */
2003                 bnx2_write_phy(bp, 0x18, 0x7);
2004                 bnx2_read_phy(bp, 0x18, &val);
2005                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2006
2007                 bnx2_read_phy(bp, 0x10, &val);
2008                 bnx2_write_phy(bp, 0x10, val | 0x1);
2009         }
2010         else {
2011                 bnx2_write_phy(bp, 0x18, 0x7);
2012                 bnx2_read_phy(bp, 0x18, &val);
2013                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2014
2015                 bnx2_read_phy(bp, 0x10, &val);
2016                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2017         }
2018
2019         /* ethernet@wirespeed */
2020         bnx2_write_phy(bp, 0x18, 0x7007);
2021         bnx2_read_phy(bp, 0x18, &val);
2022         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2023         return 0;
2024 }
2025
2026
2027 static int
2028 bnx2_init_phy(struct bnx2 *bp)
2029 {
2030         u32 val;
2031         int rc = 0;
2032
2033         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2034         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2035
2036         bp->mii_bmcr = MII_BMCR;
2037         bp->mii_bmsr = MII_BMSR;
2038         bp->mii_bmsr1 = MII_BMSR;
2039         bp->mii_adv = MII_ADVERTISE;
2040         bp->mii_lpa = MII_LPA;
2041
2042         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2043
2044         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2045                 goto setup_phy;
2046
2047         bnx2_read_phy(bp, MII_PHYSID1, &val);
2048         bp->phy_id = val << 16;
2049         bnx2_read_phy(bp, MII_PHYSID2, &val);
2050         bp->phy_id |= val & 0xffff;
2051
2052         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2053                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2054                         rc = bnx2_init_5706s_phy(bp);
2055                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2056                         rc = bnx2_init_5708s_phy(bp);
2057                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2058                         rc = bnx2_init_5709s_phy(bp);
2059         }
2060         else {
2061                 rc = bnx2_init_copper_phy(bp);
2062         }
2063
2064 setup_phy:
2065         if (!rc)
2066                 rc = bnx2_setup_phy(bp, bp->phy_port);
2067
2068         return rc;
2069 }
2070
2071 static int
2072 bnx2_set_mac_loopback(struct bnx2 *bp)
2073 {
2074         u32 mac_mode;
2075
2076         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2077         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2078         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2079         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2080         bp->link_up = 1;
2081         return 0;
2082 }
2083
2084 static int bnx2_test_link(struct bnx2 *);
2085
2086 static int
2087 bnx2_set_phy_loopback(struct bnx2 *bp)
2088 {
2089         u32 mac_mode;
2090         int rc, i;
2091
2092         spin_lock_bh(&bp->phy_lock);
2093         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2094                             BMCR_SPEED1000);
2095         spin_unlock_bh(&bp->phy_lock);
2096         if (rc)
2097                 return rc;
2098
2099         for (i = 0; i < 10; i++) {
2100                 if (bnx2_test_link(bp) == 0)
2101                         break;
2102                 msleep(100);
2103         }
2104
2105         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2106         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2107                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2108                       BNX2_EMAC_MODE_25G_MODE);
2109
2110         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2111         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2112         bp->link_up = 1;
2113         return 0;
2114 }
2115
2116 static int
2117 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2118 {
2119         int i;
2120         u32 val;
2121
2122         bp->fw_wr_seq++;
2123         msg_data |= bp->fw_wr_seq;
2124
2125         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2126
2127         /* wait for an acknowledgement. */
2128         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2129                 msleep(10);
2130
2131                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2132
2133                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2134                         break;
2135         }
2136         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2137                 return 0;
2138
2139         /* If we timed out, inform the firmware that this is the case. */
2140         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2141                 if (!silent)
2142                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2143                                             "%x\n", msg_data);
2144
2145                 msg_data &= ~BNX2_DRV_MSG_CODE;
2146                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2147
2148                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2149
2150                 return -EBUSY;
2151         }
2152
2153         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2154                 return -EIO;
2155
2156         return 0;
2157 }
2158
2159 static int
2160 bnx2_init_5709_context(struct bnx2 *bp)
2161 {
2162         int i, ret = 0;
2163         u32 val;
2164
2165         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2166         val |= (BCM_PAGE_BITS - 8) << 16;
2167         REG_WR(bp, BNX2_CTX_COMMAND, val);
2168         for (i = 0; i < 10; i++) {
2169                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2170                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2171                         break;
2172                 udelay(2);
2173         }
2174         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2175                 return -EBUSY;
2176
2177         for (i = 0; i < bp->ctx_pages; i++) {
2178                 int j;
2179
2180                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2181                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2182                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2183                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2184                        (u64) bp->ctx_blk_mapping[i] >> 32);
2185                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2186                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2187                 for (j = 0; j < 10; j++) {
2188
2189                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2190                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2191                                 break;
2192                         udelay(5);
2193                 }
2194                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2195                         ret = -EBUSY;
2196                         break;
2197                 }
2198         }
2199         return ret;
2200 }
2201
2202 static void
2203 bnx2_init_context(struct bnx2 *bp)
2204 {
2205         u32 vcid;
2206
2207         vcid = 96;
2208         while (vcid) {
2209                 u32 vcid_addr, pcid_addr, offset;
2210                 int i;
2211
2212                 vcid--;
2213
2214                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2215                         u32 new_vcid;
2216
2217                         vcid_addr = GET_PCID_ADDR(vcid);
2218                         if (vcid & 0x8) {
2219                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2220                         }
2221                         else {
2222                                 new_vcid = vcid;
2223                         }
2224                         pcid_addr = GET_PCID_ADDR(new_vcid);
2225                 }
2226                 else {
2227                         vcid_addr = GET_CID_ADDR(vcid);
2228                         pcid_addr = vcid_addr;
2229                 }
2230
2231                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2232                         vcid_addr += (i << PHY_CTX_SHIFT);
2233                         pcid_addr += (i << PHY_CTX_SHIFT);
2234
2235                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2236                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2237
2238                         /* Zero out the context. */
2239                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2240                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2241                 }
2242         }
2243 }
2244
2245 static int
2246 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2247 {
2248         u16 *good_mbuf;
2249         u32 good_mbuf_cnt;
2250         u32 val;
2251
2252         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2253         if (good_mbuf == NULL) {
2254                 printk(KERN_ERR PFX "Failed to allocate memory in "
2255                                     "bnx2_alloc_bad_rbuf\n");
2256                 return -ENOMEM;
2257         }
2258
2259         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2260                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2261
2262         good_mbuf_cnt = 0;
2263
2264         /* Allocate a bunch of mbufs and save the good ones in an array. */
2265         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2266         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2267                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2268                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2269
2270                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2271
2272                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2273
2274                 /* The addresses with Bit 9 set are bad memory blocks. */
2275                 if (!(val & (1 << 9))) {
2276                         good_mbuf[good_mbuf_cnt] = (u16) val;
2277                         good_mbuf_cnt++;
2278                 }
2279
2280                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2281         }
2282
2283         /* Free the good ones back to the mbuf pool thus discarding
2284          * all the bad ones. */
2285         while (good_mbuf_cnt) {
2286                 good_mbuf_cnt--;
2287
2288                 val = good_mbuf[good_mbuf_cnt];
2289                 val = (val << 9) | val | 1;
2290
2291                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2292         }
2293         kfree(good_mbuf);
2294         return 0;
2295 }
2296
2297 static void
2298 bnx2_set_mac_addr(struct bnx2 *bp)
2299 {
2300         u32 val;
2301         u8 *mac_addr = bp->dev->dev_addr;
2302
2303         val = (mac_addr[0] << 8) | mac_addr[1];
2304
2305         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2306
2307         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2308                 (mac_addr[4] << 8) | mac_addr[5];
2309
2310         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2311 }
2312
2313 static inline int
2314 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2315 {
2316         dma_addr_t mapping;
2317         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2318         struct rx_bd *rxbd =
2319                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2320         struct page *page = alloc_page(GFP_ATOMIC);
2321
2322         if (!page)
2323                 return -ENOMEM;
2324         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2325                                PCI_DMA_FROMDEVICE);
2326         rx_pg->page = page;
2327         pci_unmap_addr_set(rx_pg, mapping, mapping);
2328         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2329         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2330         return 0;
2331 }
2332
2333 static void
2334 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2335 {
2336         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2337         struct page *page = rx_pg->page;
2338
2339         if (!page)
2340                 return;
2341
2342         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2343                        PCI_DMA_FROMDEVICE);
2344
2345         __free_page(page);
2346         rx_pg->page = NULL;
2347 }
2348
2349 static inline int
2350 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2351 {
2352         struct sk_buff *skb;
2353         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2354         dma_addr_t mapping;
2355         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2356         unsigned long align;
2357
2358         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2359         if (skb == NULL) {
2360                 return -ENOMEM;
2361         }
2362
2363         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2364                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2365
2366         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2367                 PCI_DMA_FROMDEVICE);
2368
2369         rx_buf->skb = skb;
2370         pci_unmap_addr_set(rx_buf, mapping, mapping);
2371
2372         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2373         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2374
2375         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2376
2377         return 0;
2378 }
2379
2380 static int
2381 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2382 {
2383         struct status_block *sblk = bnapi->status_blk;
2384         u32 new_link_state, old_link_state;
2385         int is_set = 1;
2386
2387         new_link_state = sblk->status_attn_bits & event;
2388         old_link_state = sblk->status_attn_bits_ack & event;
2389         if (new_link_state != old_link_state) {
2390                 if (new_link_state)
2391                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2392                 else
2393                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2394         } else
2395                 is_set = 0;
2396
2397         return is_set;
2398 }
2399
2400 static void
2401 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2402 {
2403         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2404                 spin_lock(&bp->phy_lock);
2405                 bnx2_set_link(bp);
2406                 spin_unlock(&bp->phy_lock);
2407         }
2408         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2409                 bnx2_set_remote_link(bp);
2410
2411 }
2412
2413 static inline u16
2414 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2415 {
2416         u16 cons;
2417
2418         if (bnapi->int_num == 0)
2419                 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2420         else
2421                 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2422
2423         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2424                 cons++;
2425         return cons;
2426 }
2427
2428 static int
2429 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2430 {
2431         u16 hw_cons, sw_cons, sw_ring_cons;
2432         int tx_pkt = 0;
2433
2434         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2435         sw_cons = bnapi->tx_cons;
2436
2437         while (sw_cons != hw_cons) {
2438                 struct sw_bd *tx_buf;
2439                 struct sk_buff *skb;
2440                 int i, last;
2441
2442                 sw_ring_cons = TX_RING_IDX(sw_cons);
2443
2444                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2445                 skb = tx_buf->skb;
2446
2447                 /* partial BD completions possible with TSO packets */
2448                 if (skb_is_gso(skb)) {
2449                         u16 last_idx, last_ring_idx;
2450
2451                         last_idx = sw_cons +
2452                                 skb_shinfo(skb)->nr_frags + 1;
2453                         last_ring_idx = sw_ring_cons +
2454                                 skb_shinfo(skb)->nr_frags + 1;
2455                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2456                                 last_idx++;
2457                         }
2458                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2459                                 break;
2460                         }
2461                 }
2462
2463                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2464                         skb_headlen(skb), PCI_DMA_TODEVICE);
2465
2466                 tx_buf->skb = NULL;
2467                 last = skb_shinfo(skb)->nr_frags;
2468
2469                 for (i = 0; i < last; i++) {
2470                         sw_cons = NEXT_TX_BD(sw_cons);
2471
2472                         pci_unmap_page(bp->pdev,
2473                                 pci_unmap_addr(
2474                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2475                                         mapping),
2476                                 skb_shinfo(skb)->frags[i].size,
2477                                 PCI_DMA_TODEVICE);
2478                 }
2479
2480                 sw_cons = NEXT_TX_BD(sw_cons);
2481
2482                 dev_kfree_skb(skb);
2483                 tx_pkt++;
2484                 if (tx_pkt == budget)
2485                         break;
2486
2487                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2488         }
2489
2490         bnapi->hw_tx_cons = hw_cons;
2491         bnapi->tx_cons = sw_cons;
2492         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2493          * before checking for netif_queue_stopped().  Without the
2494          * memory barrier, there is a small possibility that bnx2_start_xmit()
2495          * will miss it and cause the queue to be stopped forever.
2496          */
2497         smp_mb();
2498
2499         if (unlikely(netif_queue_stopped(bp->dev)) &&
2500                      (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2501                 netif_tx_lock(bp->dev);
2502                 if ((netif_queue_stopped(bp->dev)) &&
2503                     (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2504                         netif_wake_queue(bp->dev);
2505                 netif_tx_unlock(bp->dev);
2506         }
2507         return tx_pkt;
2508 }
2509
2510 static void
2511 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2512                         struct sk_buff *skb, int count)
2513 {
2514         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2515         struct rx_bd *cons_bd, *prod_bd;
2516         dma_addr_t mapping;
2517         int i;
2518         u16 hw_prod = bnapi->rx_pg_prod, prod;
2519         u16 cons = bnapi->rx_pg_cons;
2520
2521         for (i = 0; i < count; i++) {
2522                 prod = RX_PG_RING_IDX(hw_prod);
2523
2524                 prod_rx_pg = &bp->rx_pg_ring[prod];
2525                 cons_rx_pg = &bp->rx_pg_ring[cons];
2526                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2527                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2528
2529                 if (i == 0 && skb) {
2530                         struct page *page;
2531                         struct skb_shared_info *shinfo;
2532
2533                         shinfo = skb_shinfo(skb);
2534                         shinfo->nr_frags--;
2535                         page = shinfo->frags[shinfo->nr_frags].page;
2536                         shinfo->frags[shinfo->nr_frags].page = NULL;
2537                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2538                                                PCI_DMA_FROMDEVICE);
2539                         cons_rx_pg->page = page;
2540                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2541                         dev_kfree_skb(skb);
2542                 }
2543                 if (prod != cons) {
2544                         prod_rx_pg->page = cons_rx_pg->page;
2545                         cons_rx_pg->page = NULL;
2546                         pci_unmap_addr_set(prod_rx_pg, mapping,
2547                                 pci_unmap_addr(cons_rx_pg, mapping));
2548
2549                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2550                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2551
2552                 }
2553                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2554                 hw_prod = NEXT_RX_BD(hw_prod);
2555         }
2556         bnapi->rx_pg_prod = hw_prod;
2557         bnapi->rx_pg_cons = cons;
2558 }
2559
2560 static inline void
2561 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2562         u16 cons, u16 prod)
2563 {
2564         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2565         struct rx_bd *cons_bd, *prod_bd;
2566
2567         cons_rx_buf = &bp->rx_buf_ring[cons];
2568         prod_rx_buf = &bp->rx_buf_ring[prod];
2569
2570         pci_dma_sync_single_for_device(bp->pdev,
2571                 pci_unmap_addr(cons_rx_buf, mapping),
2572                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2573
2574         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2575
2576         prod_rx_buf->skb = skb;
2577
2578         if (cons == prod)
2579                 return;
2580
2581         pci_unmap_addr_set(prod_rx_buf, mapping,
2582                         pci_unmap_addr(cons_rx_buf, mapping));
2583
2584         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2585         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2586         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2587         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2588 }
2589
2590 static int
2591 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2592             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2593             u32 ring_idx)
2594 {
2595         int err;
2596         u16 prod = ring_idx & 0xffff;
2597
2598         err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2599         if (unlikely(err)) {
2600                 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2601                 if (hdr_len) {
2602                         unsigned int raw_len = len + 4;
2603                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2604
2605                         bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2606                 }
2607                 return err;
2608         }
2609
2610         skb_reserve(skb, bp->rx_offset);
2611         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2612                          PCI_DMA_FROMDEVICE);
2613
2614         if (hdr_len == 0) {
2615                 skb_put(skb, len);
2616                 return 0;
2617         } else {
2618                 unsigned int i, frag_len, frag_size, pages;
2619                 struct sw_pg *rx_pg;
2620                 u16 pg_cons = bnapi->rx_pg_cons;
2621                 u16 pg_prod = bnapi->rx_pg_prod;
2622
2623                 frag_size = len + 4 - hdr_len;
2624                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2625                 skb_put(skb, hdr_len);
2626
2627                 for (i = 0; i < pages; i++) {
2628                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2629                         if (unlikely(frag_len <= 4)) {
2630                                 unsigned int tail = 4 - frag_len;
2631
2632                                 bnapi->rx_pg_cons = pg_cons;
2633                                 bnapi->rx_pg_prod = pg_prod;
2634                                 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2635                                                         pages - i);
2636                                 skb->len -= tail;
2637                                 if (i == 0) {
2638                                         skb->tail -= tail;
2639                                 } else {
2640                                         skb_frag_t *frag =
2641                                                 &skb_shinfo(skb)->frags[i - 1];
2642                                         frag->size -= tail;
2643                                         skb->data_len -= tail;
2644                                         skb->truesize -= tail;
2645                                 }
2646                                 return 0;
2647                         }
2648                         rx_pg = &bp->rx_pg_ring[pg_cons];
2649
2650                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2651                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2652
2653                         if (i == pages - 1)
2654                                 frag_len -= 4;
2655
2656                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2657                         rx_pg->page = NULL;
2658
2659                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2660                         if (unlikely(err)) {
2661                                 bnapi->rx_pg_cons = pg_cons;
2662                                 bnapi->rx_pg_prod = pg_prod;
2663                                 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2664                                                         pages - i);
2665                                 return err;
2666                         }
2667
2668                         frag_size -= frag_len;
2669                         skb->data_len += frag_len;
2670                         skb->truesize += frag_len;
2671                         skb->len += frag_len;
2672
2673                         pg_prod = NEXT_RX_BD(pg_prod);
2674                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2675                 }
2676                 bnapi->rx_pg_prod = pg_prod;
2677                 bnapi->rx_pg_cons = pg_cons;
2678         }
2679         return 0;
2680 }
2681
2682 static inline u16
2683 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2684 {
2685         u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2686
2687         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2688                 cons++;
2689         return cons;
2690 }
2691
2692 static int
2693 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2694 {
2695         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2696         struct l2_fhdr *rx_hdr;
2697         int rx_pkt = 0, pg_ring_used = 0;
2698
2699         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2700         sw_cons = bnapi->rx_cons;
2701         sw_prod = bnapi->rx_prod;
2702
2703         /* Memory barrier necessary as speculative reads of the rx
2704          * buffer can be ahead of the index in the status block
2705          */
2706         rmb();
2707         while (sw_cons != hw_cons) {
2708                 unsigned int len, hdr_len;
2709                 u32 status;
2710                 struct sw_bd *rx_buf;
2711                 struct sk_buff *skb;
2712                 dma_addr_t dma_addr;
2713
2714                 sw_ring_cons = RX_RING_IDX(sw_cons);
2715                 sw_ring_prod = RX_RING_IDX(sw_prod);
2716
2717                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2718                 skb = rx_buf->skb;
2719
2720                 rx_buf->skb = NULL;
2721
2722                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2723
2724                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2725                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2726
2727                 rx_hdr = (struct l2_fhdr *) skb->data;
2728                 len = rx_hdr->l2_fhdr_pkt_len;
2729
2730                 if ((status = rx_hdr->l2_fhdr_status) &
2731                         (L2_FHDR_ERRORS_BAD_CRC |
2732                         L2_FHDR_ERRORS_PHY_DECODE |
2733                         L2_FHDR_ERRORS_ALIGNMENT |
2734                         L2_FHDR_ERRORS_TOO_SHORT |
2735                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2736
2737                         bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2738                                           sw_ring_prod);
2739                         goto next_rx;
2740                 }
2741                 hdr_len = 0;
2742                 if (status & L2_FHDR_STATUS_SPLIT) {
2743                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2744                         pg_ring_used = 1;
2745                 } else if (len > bp->rx_jumbo_thresh) {
2746                         hdr_len = bp->rx_jumbo_thresh;
2747                         pg_ring_used = 1;
2748                 }
2749
2750                 len -= 4;
2751
2752                 if (len <= bp->rx_copy_thresh) {
2753                         struct sk_buff *new_skb;
2754
2755                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2756                         if (new_skb == NULL) {
2757                                 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2758                                                   sw_ring_prod);
2759                                 goto next_rx;
2760                         }
2761
2762                         /* aligned copy */
2763                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2764                                       new_skb->data, len + 2);
2765                         skb_reserve(new_skb, 2);
2766                         skb_put(new_skb, len);
2767
2768                         bnx2_reuse_rx_skb(bp, bnapi, skb,
2769                                 sw_ring_cons, sw_ring_prod);
2770
2771                         skb = new_skb;
2772                 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2773                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2774                         goto next_rx;
2775
2776                 skb->protocol = eth_type_trans(skb, bp->dev);
2777
2778                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2779                         (ntohs(skb->protocol) != 0x8100)) {
2780
2781                         dev_kfree_skb(skb);
2782                         goto next_rx;
2783
2784                 }
2785
2786                 skb->ip_summed = CHECKSUM_NONE;
2787                 if (bp->rx_csum &&
2788                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2789                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2790
2791                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2792                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2793                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2794                 }
2795
2796 #ifdef BCM_VLAN
2797                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2798                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2799                                 rx_hdr->l2_fhdr_vlan_tag);
2800                 }
2801                 else
2802 #endif
2803                         netif_receive_skb(skb);
2804
2805                 bp->dev->last_rx = jiffies;
2806                 rx_pkt++;
2807
2808 next_rx:
2809                 sw_cons = NEXT_RX_BD(sw_cons);
2810                 sw_prod = NEXT_RX_BD(sw_prod);
2811
2812                 if ((rx_pkt == budget))
2813                         break;
2814
2815                 /* Refresh hw_cons to see if there is new work */
2816                 if (sw_cons == hw_cons) {
2817                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2818                         rmb();
2819                 }
2820         }
2821         bnapi->rx_cons = sw_cons;
2822         bnapi->rx_prod = sw_prod;
2823
2824         if (pg_ring_used)
2825                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2826                          bnapi->rx_pg_prod);
2827
2828         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2829
2830         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2831
2832         mmiowb();
2833
2834         return rx_pkt;
2835
2836 }
2837
2838 /* MSI ISR - The only difference between this and the INTx ISR
2839  * is that the MSI interrupt is always serviced.
2840  */
2841 static irqreturn_t
2842 bnx2_msi(int irq, void *dev_instance)
2843 {
2844         struct net_device *dev = dev_instance;
2845         struct bnx2 *bp = netdev_priv(dev);
2846         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2847
2848         prefetch(bnapi->status_blk);
2849         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2850                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2851                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2852
2853         /* Return here if interrupt is disabled. */
2854         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2855                 return IRQ_HANDLED;
2856
2857         netif_rx_schedule(dev, &bnapi->napi);
2858
2859         return IRQ_HANDLED;
2860 }
2861
2862 static irqreturn_t
2863 bnx2_msi_1shot(int irq, void *dev_instance)
2864 {
2865         struct net_device *dev = dev_instance;
2866         struct bnx2 *bp = netdev_priv(dev);
2867         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2868
2869         prefetch(bnapi->status_blk);
2870
2871         /* Return here if interrupt is disabled. */
2872         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2873                 return IRQ_HANDLED;
2874
2875         netif_rx_schedule(dev, &bnapi->napi);
2876
2877         return IRQ_HANDLED;
2878 }
2879
2880 static irqreturn_t
2881 bnx2_interrupt(int irq, void *dev_instance)
2882 {
2883         struct net_device *dev = dev_instance;
2884         struct bnx2 *bp = netdev_priv(dev);
2885         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2886         struct status_block *sblk = bnapi->status_blk;
2887
2888         /* When using INTx, it is possible for the interrupt to arrive
2889          * at the CPU before the status block posted prior to the
2890          * interrupt. Reading a register will flush the status block.
2891          * When using MSI, the MSI message will always complete after
2892          * the status block write.
2893          */
2894         if ((sblk->status_idx == bnapi->last_status_idx) &&
2895             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2896              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2897                 return IRQ_NONE;
2898
2899         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2900                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2901                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2902
2903         /* Read back to deassert IRQ immediately to avoid too many
2904          * spurious interrupts.
2905          */
2906         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2907
2908         /* Return here if interrupt is shared and is disabled. */
2909         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2910                 return IRQ_HANDLED;
2911
2912         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2913                 bnapi->last_status_idx = sblk->status_idx;
2914                 __netif_rx_schedule(dev, &bnapi->napi);
2915         }
2916
2917         return IRQ_HANDLED;
2918 }
2919
2920 static irqreturn_t
2921 bnx2_tx_msix(int irq, void *dev_instance)
2922 {
2923         struct net_device *dev = dev_instance;
2924         struct bnx2 *bp = netdev_priv(dev);
2925         struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2926
2927         prefetch(bnapi->status_blk_msix);
2928
2929         /* Return here if interrupt is disabled. */
2930         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2931                 return IRQ_HANDLED;
2932
2933         netif_rx_schedule(dev, &bnapi->napi);
2934         return IRQ_HANDLED;
2935 }
2936
2937 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2938                                  STATUS_ATTN_BITS_TIMER_ABORT)
2939
2940 static inline int
2941 bnx2_has_work(struct bnx2_napi *bnapi)
2942 {
2943         struct status_block *sblk = bnapi->status_blk;
2944
2945         if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2946             (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2947                 return 1;
2948
2949         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2950             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2951                 return 1;
2952
2953         return 0;
2954 }
2955
2956 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
2957 {
2958         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2959         struct bnx2 *bp = bnapi->bp;
2960         int work_done = 0;
2961         struct status_block_msix *sblk = bnapi->status_blk_msix;
2962
2963         do {
2964                 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
2965                 if (unlikely(work_done >= budget))
2966                         return work_done;
2967
2968                 bnapi->last_status_idx = sblk->status_idx;
2969                 rmb();
2970         } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
2971
2972         netif_rx_complete(bp->dev, napi);
2973         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
2974                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2975                bnapi->last_status_idx);
2976         return work_done;
2977 }
2978
2979 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2980                           int work_done, int budget)
2981 {
2982         struct status_block *sblk = bnapi->status_blk;
2983         u32 status_attn_bits = sblk->status_attn_bits;
2984         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2985
2986         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2987             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2988
2989                 bnx2_phy_int(bp, bnapi);
2990
2991                 /* This is needed to take care of transient status
2992                  * during link changes.
2993                  */
2994                 REG_WR(bp, BNX2_HC_COMMAND,
2995                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2996                 REG_RD(bp, BNX2_HC_COMMAND);
2997         }
2998
2999         if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
3000                 bnx2_tx_int(bp, bnapi, 0);
3001
3002         if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
3003                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3004
3005         return work_done;
3006 }
3007
3008 static int bnx2_poll(struct napi_struct *napi, int budget)
3009 {
3010         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3011         struct bnx2 *bp = bnapi->bp;
3012         int work_done = 0;
3013         struct status_block *sblk = bnapi->status_blk;
3014
3015         while (1) {
3016                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3017
3018                 if (unlikely(work_done >= budget))
3019                         break;
3020
3021                 /* bnapi->last_status_idx is used below to tell the hw how
3022                  * much work has been processed, so we must read it before
3023                  * checking for more work.
3024                  */
3025                 bnapi->last_status_idx = sblk->status_idx;
3026                 rmb();
3027                 if (likely(!bnx2_has_work(bnapi))) {
3028                         netif_rx_complete(bp->dev, napi);
3029                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3030                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3031                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3032                                        bnapi->last_status_idx);
3033                                 break;
3034                         }
3035                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3036                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3037                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3038                                bnapi->last_status_idx);
3039
3040                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3041                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3042                                bnapi->last_status_idx);
3043                         break;
3044                 }
3045         }
3046
3047         return work_done;
3048 }
3049
3050 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3051  * from set_multicast.
3052  */
3053 static void
3054 bnx2_set_rx_mode(struct net_device *dev)
3055 {
3056         struct bnx2 *bp = netdev_priv(dev);
3057         u32 rx_mode, sort_mode;
3058         int i;
3059
3060         spin_lock_bh(&bp->phy_lock);
3061
3062         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3063                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3064         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3065 #ifdef BCM_VLAN
3066         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3067                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3068 #else
3069         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3070                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3071 #endif
3072         if (dev->flags & IFF_PROMISC) {
3073                 /* Promiscuous mode. */
3074                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3075                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3076                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3077         }
3078         else if (dev->flags & IFF_ALLMULTI) {
3079                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3080                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3081                                0xffffffff);
3082                 }
3083                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3084         }
3085         else {
3086                 /* Accept one or more multicast(s). */
3087                 struct dev_mc_list *mclist;
3088                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3089                 u32 regidx;
3090                 u32 bit;
3091                 u32 crc;
3092
3093                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3094
3095                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3096                      i++, mclist = mclist->next) {
3097
3098                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3099                         bit = crc & 0xff;
3100                         regidx = (bit & 0xe0) >> 5;
3101                         bit &= 0x1f;
3102                         mc_filter[regidx] |= (1 << bit);
3103                 }
3104
3105                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3106                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3107                                mc_filter[i]);
3108                 }
3109
3110                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3111         }
3112
3113         if (rx_mode != bp->rx_mode) {
3114                 bp->rx_mode = rx_mode;
3115                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3116         }
3117
3118         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3119         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3120         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3121
3122         spin_unlock_bh(&bp->phy_lock);
3123 }
3124
3125 static void
3126 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3127         u32 rv2p_proc)
3128 {
3129         int i;
3130         u32 val;
3131
3132
3133         for (i = 0; i < rv2p_code_len; i += 8) {
3134                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3135                 rv2p_code++;
3136                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3137                 rv2p_code++;
3138
3139                 if (rv2p_proc == RV2P_PROC1) {
3140                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3141                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3142                 }
3143                 else {
3144                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3145                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3146                 }
3147         }
3148
3149         /* Reset the processor, un-stall is done later. */
3150         if (rv2p_proc == RV2P_PROC1) {
3151                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3152         }
3153         else {
3154                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3155         }
3156 }
3157
3158 static int
3159 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3160 {
3161         u32 offset;
3162         u32 val;
3163         int rc;
3164
3165         /* Halt the CPU. */
3166         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3167         val |= cpu_reg->mode_value_halt;
3168         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3169         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3170
3171         /* Load the Text area. */
3172         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3173         if (fw->gz_text) {
3174                 int j;
3175
3176                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3177                                        fw->gz_text_len);
3178                 if (rc < 0)
3179                         return rc;
3180
3181                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3182                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3183                 }
3184         }
3185
3186         /* Load the Data area. */
3187         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3188         if (fw->data) {
3189                 int j;
3190
3191                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3192                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3193                 }
3194         }
3195
3196         /* Load the SBSS area. */
3197         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3198         if (fw->sbss_len) {
3199                 int j;
3200
3201                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3202                         bnx2_reg_wr_ind(bp, offset, 0);
3203                 }
3204         }
3205
3206         /* Load the BSS area. */
3207         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3208         if (fw->bss_len) {
3209                 int j;
3210
3211                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3212                         bnx2_reg_wr_ind(bp, offset, 0);
3213                 }
3214         }
3215
3216         /* Load the Read-Only area. */
3217         offset = cpu_reg->spad_base +
3218                 (fw->rodata_addr - cpu_reg->mips_view_base);
3219         if (fw->rodata) {
3220                 int j;
3221
3222                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3223                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3224                 }
3225         }
3226
3227         /* Clear the pre-fetch instruction. */
3228         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3229         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3230
3231         /* Start the CPU. */
3232         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3233         val &= ~cpu_reg->mode_value_halt;
3234         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3235         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3236
3237         return 0;
3238 }
3239
3240 static int
3241 bnx2_init_cpus(struct bnx2 *bp)
3242 {
3243         struct cpu_reg cpu_reg;
3244         struct fw_info *fw;
3245         int rc, rv2p_len;
3246         void *text, *rv2p;
3247
3248         /* Initialize the RV2P processor. */
3249         text = vmalloc(FW_BUF_SIZE);
3250         if (!text)
3251                 return -ENOMEM;
3252         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3253                 rv2p = bnx2_xi_rv2p_proc1;
3254                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3255         } else {
3256                 rv2p = bnx2_rv2p_proc1;
3257                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3258         }
3259         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3260         if (rc < 0)
3261                 goto init_cpu_err;
3262
3263         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3264
3265         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3266                 rv2p = bnx2_xi_rv2p_proc2;
3267                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3268         } else {
3269                 rv2p = bnx2_rv2p_proc2;
3270                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3271         }
3272         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3273         if (rc < 0)
3274                 goto init_cpu_err;
3275
3276         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3277
3278         /* Initialize the RX Processor. */
3279         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3280         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3281         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3282         cpu_reg.state = BNX2_RXP_CPU_STATE;
3283         cpu_reg.state_value_clear = 0xffffff;
3284         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3285         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3286         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3287         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3288         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3289         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3290         cpu_reg.mips_view_base = 0x8000000;
3291
3292         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3293                 fw = &bnx2_rxp_fw_09;
3294         else
3295                 fw = &bnx2_rxp_fw_06;
3296
3297         fw->text = text;
3298         rc = load_cpu_fw(bp, &cpu_reg, fw);
3299         if (rc)
3300                 goto init_cpu_err;
3301
3302         /* Initialize the TX Processor. */
3303         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3304         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3305         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3306         cpu_reg.state = BNX2_TXP_CPU_STATE;
3307         cpu_reg.state_value_clear = 0xffffff;
3308         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3309         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3310         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3311         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3312         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3313         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3314         cpu_reg.mips_view_base = 0x8000000;
3315
3316         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3317                 fw = &bnx2_txp_fw_09;
3318         else
3319                 fw = &bnx2_txp_fw_06;
3320
3321         fw->text = text;
3322         rc = load_cpu_fw(bp, &cpu_reg, fw);
3323         if (rc)
3324                 goto init_cpu_err;
3325
3326         /* Initialize the TX Patch-up Processor. */
3327         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3328         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3329         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3330         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3331         cpu_reg.state_value_clear = 0xffffff;
3332         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3333         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3334         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3335         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3336         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3337         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3338         cpu_reg.mips_view_base = 0x8000000;
3339
3340         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3341                 fw = &bnx2_tpat_fw_09;
3342         else
3343                 fw = &bnx2_tpat_fw_06;
3344
3345         fw->text = text;
3346         rc = load_cpu_fw(bp, &cpu_reg, fw);
3347         if (rc)
3348                 goto init_cpu_err;
3349
3350         /* Initialize the Completion Processor. */
3351         cpu_reg.mode = BNX2_COM_CPU_MODE;
3352         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3353         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3354         cpu_reg.state = BNX2_COM_CPU_STATE;
3355         cpu_reg.state_value_clear = 0xffffff;
3356         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3357         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3358         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3359         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3360         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3361         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3362         cpu_reg.mips_view_base = 0x8000000;
3363
3364         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3365                 fw = &bnx2_com_fw_09;
3366         else
3367                 fw = &bnx2_com_fw_06;
3368
3369         fw->text = text;
3370         rc = load_cpu_fw(bp, &cpu_reg, fw);
3371         if (rc)
3372                 goto init_cpu_err;
3373
3374         /* Initialize the Command Processor. */
3375         cpu_reg.mode = BNX2_CP_CPU_MODE;
3376         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3377         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3378         cpu_reg.state = BNX2_CP_CPU_STATE;
3379         cpu_reg.state_value_clear = 0xffffff;
3380         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3381         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3382         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3383         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3384         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3385         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3386         cpu_reg.mips_view_base = 0x8000000;
3387
3388         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3389                 fw = &bnx2_cp_fw_09;
3390         else
3391                 fw = &bnx2_cp_fw_06;
3392
3393         fw->text = text;
3394         rc = load_cpu_fw(bp, &cpu_reg, fw);
3395
3396 init_cpu_err:
3397         vfree(text);
3398         return rc;
3399 }
3400
3401 static int
3402 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3403 {
3404         u16 pmcsr;
3405
3406         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3407
3408         switch (state) {
3409         case PCI_D0: {
3410                 u32 val;
3411
3412                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3413                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3414                         PCI_PM_CTRL_PME_STATUS);
3415
3416                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3417                         /* delay required during transition out of D3hot */
3418                         msleep(20);
3419
3420                 val = REG_RD(bp, BNX2_EMAC_MODE);
3421                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3422                 val &= ~BNX2_EMAC_MODE_MPKT;
3423                 REG_WR(bp, BNX2_EMAC_MODE, val);
3424
3425                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3426                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3427                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3428                 break;
3429         }
3430         case PCI_D3hot: {
3431                 int i;
3432                 u32 val, wol_msg;
3433
3434                 if (bp->wol) {
3435                         u32 advertising;
3436                         u8 autoneg;
3437
3438                         autoneg = bp->autoneg;
3439                         advertising = bp->advertising;
3440
3441                         if (bp->phy_port == PORT_TP) {
3442                                 bp->autoneg = AUTONEG_SPEED;
3443                                 bp->advertising = ADVERTISED_10baseT_Half |
3444                                         ADVERTISED_10baseT_Full |
3445                                         ADVERTISED_100baseT_Half |
3446                                         ADVERTISED_100baseT_Full |
3447                                         ADVERTISED_Autoneg;
3448                         }
3449
3450                         spin_lock_bh(&bp->phy_lock);
3451                         bnx2_setup_phy(bp, bp->phy_port);
3452                         spin_unlock_bh(&bp->phy_lock);
3453
3454                         bp->autoneg = autoneg;
3455                         bp->advertising = advertising;
3456
3457                         bnx2_set_mac_addr(bp);
3458
3459                         val = REG_RD(bp, BNX2_EMAC_MODE);
3460
3461                         /* Enable port mode. */
3462                         val &= ~BNX2_EMAC_MODE_PORT;
3463                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3464                                BNX2_EMAC_MODE_ACPI_RCVD |
3465                                BNX2_EMAC_MODE_MPKT;
3466                         if (bp->phy_port == PORT_TP)
3467                                 val |= BNX2_EMAC_MODE_PORT_MII;
3468                         else {
3469                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3470                                 if (bp->line_speed == SPEED_2500)
3471                                         val |= BNX2_EMAC_MODE_25G_MODE;
3472                         }
3473
3474                         REG_WR(bp, BNX2_EMAC_MODE, val);
3475
3476                         /* receive all multicast */
3477                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3478                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3479                                        0xffffffff);
3480                         }
3481                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3482                                BNX2_EMAC_RX_MODE_SORT_MODE);
3483
3484                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3485                               BNX2_RPM_SORT_USER0_MC_EN;
3486                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3487                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3488                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3489                                BNX2_RPM_SORT_USER0_ENA);
3490
3491                         /* Need to enable EMAC and RPM for WOL. */
3492                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3493                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3494                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3495                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3496
3497                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3498                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3499                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3500
3501                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3502                 }
3503                 else {
3504                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3505                 }
3506
3507                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3508                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3509
3510                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3511                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3512                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3513
3514                         if (bp->wol)
3515                                 pmcsr |= 3;
3516                 }
3517                 else {
3518                         pmcsr |= 3;
3519                 }
3520                 if (bp->wol) {
3521                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3522                 }
3523                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3524                                       pmcsr);
3525
3526                 /* No more memory access after this point until
3527                  * device is brought back to D0.
3528                  */
3529                 udelay(50);
3530                 break;
3531         }
3532         default:
3533                 return -EINVAL;
3534         }
3535         return 0;
3536 }
3537
3538 static int
3539 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3540 {
3541         u32 val;
3542         int j;
3543
3544         /* Request access to the flash interface. */
3545         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3546         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3547                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3548                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3549                         break;
3550
3551                 udelay(5);
3552         }
3553
3554         if (j >= NVRAM_TIMEOUT_COUNT)
3555                 return -EBUSY;
3556
3557         return 0;
3558 }
3559
3560 static int
3561 bnx2_release_nvram_lock(struct bnx2 *bp)
3562 {
3563         int j;
3564         u32 val;
3565
3566         /* Relinquish nvram interface. */
3567         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3568
3569         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3570                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3571                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3572                         break;
3573
3574                 udelay(5);
3575         }
3576
3577         if (j >= NVRAM_TIMEOUT_COUNT)
3578                 return -EBUSY;
3579
3580         return 0;
3581 }
3582
3583
3584 static int
3585 bnx2_enable_nvram_write(struct bnx2 *bp)
3586 {
3587         u32 val;
3588
3589         val = REG_RD(bp, BNX2_MISC_CFG);
3590         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3591
3592         if (bp->flash_info->flags & BNX2_NV_WREN) {
3593                 int j;
3594
3595                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3596                 REG_WR(bp, BNX2_NVM_COMMAND,
3597                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3598
3599                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3600                         udelay(5);
3601
3602                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3603                         if (val & BNX2_NVM_COMMAND_DONE)
3604                                 break;
3605                 }
3606
3607                 if (j >= NVRAM_TIMEOUT_COUNT)
3608                         return -EBUSY;
3609         }
3610         return 0;
3611 }
3612
3613 static void
3614 bnx2_disable_nvram_write(struct bnx2 *bp)
3615 {
3616         u32 val;
3617
3618         val = REG_RD(bp, BNX2_MISC_CFG);
3619         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3620 }
3621
3622
3623 static void
3624 bnx2_enable_nvram_access(struct bnx2 *bp)
3625 {
3626         u32 val;
3627
3628         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3629         /* Enable both bits, even on read. */
3630         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3631                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3632 }
3633
3634 static void
3635 bnx2_disable_nvram_access(struct bnx2 *bp)
3636 {
3637         u32 val;
3638
3639         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3640         /* Disable both bits, even after read. */
3641         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3642                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3643                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3644 }
3645
3646 static int
3647 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3648 {
3649         u32 cmd;
3650         int j;
3651
3652         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3653                 /* Buffered flash, no erase needed */
3654                 return 0;
3655
3656         /* Build an erase command */
3657         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3658               BNX2_NVM_COMMAND_DOIT;
3659
3660         /* Need to clear DONE bit separately. */
3661         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3662
3663         /* Address of the NVRAM to read from. */
3664         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3665
3666         /* Issue an erase command. */
3667         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3668
3669         /* Wait for completion. */
3670         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3671                 u32 val;
3672
3673                 udelay(5);
3674
3675                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3676                 if (val & BNX2_NVM_COMMAND_DONE)
3677                         break;
3678         }
3679
3680         if (j >= NVRAM_TIMEOUT_COUNT)
3681                 return -EBUSY;
3682
3683         return 0;
3684 }
3685
3686 static int
3687 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3688 {
3689         u32 cmd;
3690         int j;
3691
3692         /* Build the command word. */
3693         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3694
3695         /* Calculate an offset of a buffered flash, not needed for 5709. */
3696         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3697                 offset = ((offset / bp->flash_info->page_size) <<
3698                            bp->flash_info->page_bits) +
3699                           (offset % bp->flash_info->page_size);
3700         }
3701
3702         /* Need to clear DONE bit separately. */
3703         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3704
3705         /* Address of the NVRAM to read from. */
3706         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3707
3708         /* Issue a read command. */
3709         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3710
3711         /* Wait for completion. */
3712         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3713                 u32 val;
3714
3715                 udelay(5);
3716
3717                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3718                 if (val & BNX2_NVM_COMMAND_DONE) {
3719                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3720                         memcpy(ret_val, &v, 4);
3721                         break;
3722                 }
3723         }
3724         if (j >= NVRAM_TIMEOUT_COUNT)
3725                 return -EBUSY;
3726
3727         return 0;
3728 }
3729
3730
3731 static int
3732 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3733 {
3734         u32 cmd;
3735         __be32 val32;
3736         int j;
3737
3738         /* Build the command word. */
3739         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3740
3741         /* Calculate an offset of a buffered flash, not needed for 5709. */
3742         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3743                 offset = ((offset / bp->flash_info->page_size) <<
3744                           bp->flash_info->page_bits) +
3745                          (offset % bp->flash_info->page_size);
3746         }
3747
3748         /* Need to clear DONE bit separately. */
3749         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3750
3751         memcpy(&val32, val, 4);
3752
3753         /* Write the data. */
3754         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3755
3756         /* Address of the NVRAM to write to. */
3757         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3758
3759         /* Issue the write command. */
3760         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3761
3762         /* Wait for completion. */
3763         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3764                 udelay(5);
3765
3766                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3767                         break;
3768         }
3769         if (j >= NVRAM_TIMEOUT_COUNT)
3770                 return -EBUSY;
3771
3772         return 0;
3773 }
3774
3775 static int
3776 bnx2_init_nvram(struct bnx2 *bp)
3777 {
3778         u32 val;
3779         int j, entry_count, rc = 0;
3780         struct flash_spec *flash;
3781
3782         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3783                 bp->flash_info = &flash_5709;
3784                 goto get_flash_size;
3785         }
3786
3787         /* Determine the selected interface. */
3788         val = REG_RD(bp, BNX2_NVM_CFG1);
3789
3790         entry_count = ARRAY_SIZE(flash_table);
3791
3792         if (val & 0x40000000) {
3793
3794                 /* Flash interface has been reconfigured */
3795                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3796                      j++, flash++) {
3797                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3798                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3799                                 bp->flash_info = flash;
3800                                 break;
3801                         }
3802                 }
3803         }
3804         else {
3805                 u32 mask;
3806                 /* Not yet been reconfigured */
3807
3808                 if (val & (1 << 23))
3809                         mask = FLASH_BACKUP_STRAP_MASK;
3810                 else
3811                         mask = FLASH_STRAP_MASK;
3812
3813                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3814                         j++, flash++) {
3815
3816                         if ((val & mask) == (flash->strapping & mask)) {
3817                                 bp->flash_info = flash;
3818
3819                                 /* Request access to the flash interface. */
3820                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3821                                         return rc;
3822
3823                                 /* Enable access to flash interface */
3824                                 bnx2_enable_nvram_access(bp);
3825
3826                                 /* Reconfigure the flash interface */
3827                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3828                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3829                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3830                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3831
3832                                 /* Disable access to flash interface */
3833                                 bnx2_disable_nvram_access(bp);
3834                                 bnx2_release_nvram_lock(bp);
3835
3836                                 break;
3837                         }
3838                 }
3839         } /* if (val & 0x40000000) */
3840
3841         if (j == entry_count) {
3842                 bp->flash_info = NULL;
3843                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3844                 return -ENODEV;
3845         }
3846
3847 get_flash_size:
3848         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3849         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3850         if (val)
3851                 bp->flash_size = val;
3852         else
3853                 bp->flash_size = bp->flash_info->total_size;
3854
3855         return rc;
3856 }
3857
3858 static int
3859 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3860                 int buf_size)
3861 {
3862         int rc = 0;
3863         u32 cmd_flags, offset32, len32, extra;
3864
3865         if (buf_size == 0)
3866                 return 0;
3867
3868         /* Request access to the flash interface. */
3869         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3870                 return rc;
3871
3872         /* Enable access to flash interface */
3873         bnx2_enable_nvram_access(bp);
3874
3875         len32 = buf_size;
3876         offset32 = offset;
3877         extra = 0;
3878
3879         cmd_flags = 0;
3880
3881         if (offset32 & 3) {
3882                 u8 buf[4];
3883                 u32 pre_len;
3884
3885                 offset32 &= ~3;
3886                 pre_len = 4 - (offset & 3);
3887
3888                 if (pre_len >= len32) {
3889                         pre_len = len32;
3890                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3891                                     BNX2_NVM_COMMAND_LAST;
3892                 }
3893                 else {
3894                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3895                 }
3896
3897                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3898
3899                 if (rc)
3900                         return rc;
3901
3902                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3903
3904                 offset32 += 4;
3905                 ret_buf += pre_len;
3906                 len32 -= pre_len;
3907         }
3908         if (len32 & 3) {
3909                 extra = 4 - (len32 & 3);
3910                 len32 = (len32 + 4) & ~3;
3911         }
3912
3913         if (len32 == 4) {
3914                 u8 buf[4];
3915
3916                 if (cmd_flags)
3917                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3918                 else
3919                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3920                                     BNX2_NVM_COMMAND_LAST;
3921
3922                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3923
3924                 memcpy(ret_buf, buf, 4 - extra);
3925         }
3926         else if (len32 > 0) {
3927                 u8 buf[4];
3928
3929                 /* Read the first word. */
3930                 if (cmd_flags)
3931                         cmd_flags = 0;
3932                 else
3933                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3934
3935                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3936
3937                 /* Advance to the next dword. */
3938                 offset32 += 4;
3939                 ret_buf += 4;
3940                 len32 -= 4;
3941
3942                 while (len32 > 4 && rc == 0) {
3943                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3944
3945                         /* Advance to the next dword. */
3946                         offset32 += 4;
3947                         ret_buf += 4;
3948                         len32 -= 4;
3949                 }
3950
3951                 if (rc)
3952                         return rc;
3953
3954                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3955                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3956
3957                 memcpy(ret_buf, buf, 4 - extra);
3958         }
3959
3960         /* Disable access to flash interface */
3961         bnx2_disable_nvram_access(bp);
3962
3963         bnx2_release_nvram_lock(bp);
3964
3965         return rc;
3966 }
3967
3968 static int
3969 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3970                 int buf_size)
3971 {
3972         u32 written, offset32, len32;
3973         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3974         int rc = 0;
3975         int align_start, align_end;
3976
3977         buf = data_buf;
3978         offset32 = offset;
3979         len32 = buf_size;
3980         align_start = align_end = 0;
3981
3982         if ((align_start = (offset32 & 3))) {
3983                 offset32 &= ~3;
3984                 len32 += align_start;
3985                 if (len32 < 4)
3986                         len32 = 4;
3987                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3988                         return rc;
3989         }
3990
3991         if (len32 & 3) {
3992                 align_end = 4 - (len32 & 3);
3993                 len32 += align_end;
3994                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3995                         return rc;
3996         }
3997
3998         if (align_start || align_end) {
3999                 align_buf = kmalloc(len32, GFP_KERNEL);
4000                 if (align_buf == NULL)
4001                         return -ENOMEM;
4002                 if (align_start) {
4003                         memcpy(align_buf, start, 4);
4004                 }
4005                 if (align_end) {
4006                         memcpy(align_buf + len32 - 4, end, 4);
4007                 }
4008                 memcpy(align_buf + align_start, data_buf, buf_size);
4009                 buf = align_buf;
4010         }
4011
4012         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4013                 flash_buffer = kmalloc(264, GFP_KERNEL);
4014                 if (flash_buffer == NULL) {
4015                         rc = -ENOMEM;
4016                         goto nvram_write_end;
4017                 }
4018         }
4019
4020         written = 0;
4021         while ((written < len32) && (rc == 0)) {
4022                 u32 page_start, page_end, data_start, data_end;
4023                 u32 addr, cmd_flags;
4024                 int i;
4025
4026                 /* Find the page_start addr */
4027                 page_start = offset32 + written;
4028                 page_start -= (page_start % bp->flash_info->page_size);
4029                 /* Find the page_end addr */
4030                 page_end = page_start + bp->flash_info->page_size;
4031                 /* Find the data_start addr */
4032                 data_start = (written == 0) ? offset32 : page_start;
4033                 /* Find the data_end addr */
4034                 data_end = (page_end > offset32 + len32) ?
4035                         (offset32 + len32) : page_end;
4036
4037                 /* Request access to the flash interface. */
4038                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4039                         goto nvram_write_end;
4040
4041                 /* Enable access to flash interface */
4042                 bnx2_enable_nvram_access(bp);
4043
4044                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4045                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4046                         int j;
4047
4048                         /* Read the whole page into the buffer
4049                          * (non-buffer flash only) */
4050                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4051                                 if (j == (bp->flash_info->page_size - 4)) {
4052                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4053                                 }
4054                                 rc = bnx2_nvram_read_dword(bp,
4055                                         page_start + j,
4056                                         &flash_buffer[j],
4057                                         cmd_flags);
4058
4059                                 if (rc)
4060                                         goto nvram_write_end;
4061
4062                                 cmd_flags = 0;
4063                         }
4064                 }
4065
4066                 /* Enable writes to flash interface (unlock write-protect) */
4067                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4068                         goto nvram_write_end;
4069
4070                 /* Loop to write back the buffer data from page_start to
4071                  * data_start */
4072                 i = 0;
4073                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4074                         /* Erase the page */
4075                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4076                                 goto nvram_write_end;
4077
4078                         /* Re-enable the write again for the actual write */
4079                         bnx2_enable_nvram_write(bp);
4080
4081                         for (addr = page_start; addr < data_start;
4082                                 addr += 4, i += 4) {
4083
4084                                 rc = bnx2_nvram_write_dword(bp, addr,
4085                                         &flash_buffer[i], cmd_flags);
4086
4087                                 if (rc != 0)
4088                                         goto nvram_write_end;
4089
4090                                 cmd_flags = 0;
4091                         }
4092                 }
4093
4094                 /* Loop to write the new data from data_start to data_end */
4095                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4096                         if ((addr == page_end - 4) ||
4097                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4098                                  (addr == data_end - 4))) {
4099
4100                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4101                         }
4102                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4103                                 cmd_flags);
4104
4105                         if (rc != 0)
4106                                 goto nvram_write_end;
4107
4108                         cmd_flags = 0;
4109                         buf += 4;
4110                 }
4111
4112                 /* Loop to write back the buffer data from data_end
4113                  * to page_end */
4114                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4115                         for (addr = data_end; addr < page_end;
4116                                 addr += 4, i += 4) {
4117
4118                                 if (addr == page_end-4) {
4119                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4120                                 }
4121                                 rc = bnx2_nvram_write_dword(bp, addr,
4122                                         &flash_buffer[i], cmd_flags);
4123
4124                                 if (rc != 0)
4125                                         goto nvram_write_end;
4126
4127                                 cmd_flags = 0;
4128                         }
4129                 }
4130
4131                 /* Disable writes to flash interface (lock write-protect) */
4132                 bnx2_disable_nvram_write(bp);
4133
4134                 /* Disable access to flash interface */
4135                 bnx2_disable_nvram_access(bp);
4136                 bnx2_release_nvram_lock(bp);
4137
4138                 /* Increment written */
4139                 written += data_end - data_start;
4140         }
4141
4142 nvram_write_end:
4143         kfree(flash_buffer);
4144         kfree(align_buf);
4145         return rc;
4146 }
4147
4148 static void
4149 bnx2_init_remote_phy(struct bnx2 *bp)
4150 {
4151         u32 val;
4152
4153         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4154         if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4155                 return;
4156
4157         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4158         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4159                 return;
4160
4161         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4162                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4163
4164                 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4165                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4166                         bp->phy_port = PORT_FIBRE;
4167                 else
4168                         bp->phy_port = PORT_TP;
4169
4170                 if (netif_running(bp->dev)) {
4171                         u32 sig;
4172
4173                         if (val & BNX2_LINK_STATUS_LINK_UP) {
4174                                 bp->link_up = 1;
4175                                 netif_carrier_on(bp->dev);
4176                         } else {
4177                                 bp->link_up = 0;
4178                                 netif_carrier_off(bp->dev);
4179                         }
4180                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4181                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4182                         bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4183                 }
4184         }
4185 }
4186
4187 static void
4188 bnx2_setup_msix_tbl(struct bnx2 *bp)
4189 {
4190         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4191
4192         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4193         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4194 }
4195
4196 static int
4197 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4198 {
4199         u32 val;
4200         int i, rc = 0;
4201         u8 old_port;
4202
4203         /* Wait for the current PCI transaction to complete before
4204          * issuing a reset. */
4205         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4206                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4207                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4208                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4209                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4210         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4211         udelay(5);
4212
4213         /* Wait for the firmware to tell us it is ok to issue a reset. */
4214         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4215
4216         /* Deposit a driver reset signature so the firmware knows that
4217          * this is a soft reset. */
4218         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4219                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4220
4221         /* Do a dummy read to force the chip to complete all current transaction
4222          * before we issue a reset. */
4223         val = REG_RD(bp, BNX2_MISC_ID);
4224
4225         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4226                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4227                 REG_RD(bp, BNX2_MISC_COMMAND);
4228                 udelay(5);
4229
4230                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4231                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4232
4233                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4234
4235         } else {
4236                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4237                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4238                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4239
4240                 /* Chip reset. */
4241                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4242
4243                 /* Reading back any register after chip reset will hang the
4244                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4245                  * of margin for write posting.
4246                  */
4247                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4248                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4249                         msleep(20);
4250
4251                 /* Reset takes approximate 30 usec */
4252                 for (i = 0; i < 10; i++) {
4253                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4254                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4255                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4256                                 break;
4257                         udelay(10);
4258                 }
4259
4260                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4261                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4262                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4263                         return -EBUSY;
4264                 }
4265         }
4266
4267         /* Make sure byte swapping is properly configured. */
4268         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4269         if (val != 0x01020304) {
4270                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4271                 return -ENODEV;
4272         }
4273
4274         /* Wait for the firmware to finish its initialization. */
4275         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4276         if (rc)
4277                 return rc;
4278
4279         spin_lock_bh(&bp->phy_lock);
4280         old_port = bp->phy_port;
4281         bnx2_init_remote_phy(bp);
4282         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4283             old_port != bp->phy_port)
4284                 bnx2_set_default_remote_link(bp);
4285         spin_unlock_bh(&bp->phy_lock);
4286
4287         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4288                 /* Adjust the voltage regular to two steps lower.  The default
4289                  * of this register is 0x0000000e. */
4290                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4291
4292                 /* Remove bad rbuf memory from the free pool. */
4293                 rc = bnx2_alloc_bad_rbuf(bp);
4294         }
4295
4296         if (bp->flags & BNX2_FLAG_USING_MSIX)
4297                 bnx2_setup_msix_tbl(bp);
4298
4299         return rc;
4300 }
4301
4302 static int
4303 bnx2_init_chip(struct bnx2 *bp)
4304 {
4305         u32 val;
4306         int rc, i;
4307
4308         /* Make sure the interrupt is not active. */
4309         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4310
4311         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4312               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4313 #ifdef __BIG_ENDIAN
4314               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4315 #endif
4316               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4317               DMA_READ_CHANS << 12 |
4318               DMA_WRITE_CHANS << 16;
4319
4320         val |= (0x2 << 20) | (1 << 11);
4321
4322         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4323                 val |= (1 << 23);
4324
4325         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4326             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4327                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4328
4329         REG_WR(bp, BNX2_DMA_CONFIG, val);
4330
4331         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4332                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4333                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4334                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4335         }
4336
4337         if (bp->flags & BNX2_FLAG_PCIX) {
4338                 u16 val16;
4339
4340                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4341                                      &val16);
4342                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4343                                       val16 & ~PCI_X_CMD_ERO);
4344         }
4345
4346         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4347                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4348                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4349                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4350
4351         /* Initialize context mapping and zero out the quick contexts.  The
4352          * context block must have already been enabled. */
4353         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4354                 rc = bnx2_init_5709_context(bp);
4355                 if (rc)
4356                         return rc;
4357         } else
4358                 bnx2_init_context(bp);
4359
4360         if ((rc = bnx2_init_cpus(bp)) != 0)
4361                 return rc;
4362
4363         bnx2_init_nvram(bp);
4364
4365         bnx2_set_mac_addr(bp);
4366
4367         val = REG_RD(bp, BNX2_MQ_CONFIG);
4368         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4369         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4370         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4371                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4372
4373         REG_WR(bp, BNX2_MQ_CONFIG, val);
4374
4375         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4376         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4377         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4378
4379         val = (BCM_PAGE_BITS - 8) << 24;
4380         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4381
4382         /* Configure page size. */
4383         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4384         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4385         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4386         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4387
4388         val = bp->mac_addr[0] +
4389               (bp->mac_addr[1] << 8) +
4390               (bp->mac_addr[2] << 16) +
4391               bp->mac_addr[3] +
4392               (bp->mac_addr[4] << 8) +
4393               (bp->mac_addr[5] << 16);
4394         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4395
4396         /* Program the MTU.  Also include 4 bytes for CRC32. */
4397         val = bp->dev->mtu + ETH_HLEN + 4;
4398         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4399                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4400         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4401
4402         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4403                 bp->bnx2_napi[i].last_status_idx = 0;
4404
4405         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4406
4407         /* Set up how to generate a link change interrupt. */
4408         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4409
4410         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4411                (u64) bp->status_blk_mapping & 0xffffffff);
4412         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4413
4414         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4415                (u64) bp->stats_blk_mapping & 0xffffffff);
4416         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4417                (u64) bp->stats_blk_mapping >> 32);
4418
4419         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4420                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4421
4422         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4423                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4424
4425         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4426                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4427
4428         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4429
4430         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4431
4432         REG_WR(bp, BNX2_HC_COM_TICKS,
4433                (bp->com_ticks_int << 16) | bp->com_ticks);
4434
4435         REG_WR(bp, BNX2_HC_CMD_TICKS,
4436                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4437
4438         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4439                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4440         else
4441                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4442         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4443
4444         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4445                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4446         else {
4447                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4448                       BNX2_HC_CONFIG_COLLECT_STATS;
4449         }
4450
4451         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4452                 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4453                            BNX2_HC_SB_CONFIG_1;
4454
4455                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4456                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4457
4458                 REG_WR(bp, base,
4459                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4460                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4461
4462                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4463                         (bp->tx_quick_cons_trip_int << 16) |
4464                          bp->tx_quick_cons_trip);
4465
4466                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4467                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4468
4469                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4470         }
4471
4472         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4473                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4474
4475         REG_WR(bp, BNX2_HC_CONFIG, val);
4476
4477         /* Clear internal stats counters. */
4478         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4479
4480         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4481
4482         /* Initialize the receive filter. */
4483         bnx2_set_rx_mode(bp->dev);
4484
4485         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4486                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4487                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4488                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4489         }
4490         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4491                           0);
4492
4493         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4494         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4495
4496         udelay(20);
4497
4498         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4499
4500         return rc;
4501 }
4502
4503 static void
4504 bnx2_clear_ring_states(struct bnx2 *bp)
4505 {
4506         struct bnx2_napi *bnapi;
4507         int i;
4508
4509         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4510                 bnapi = &bp->bnx2_napi[i];
4511
4512                 bnapi->tx_cons = 0;
4513                 bnapi->hw_tx_cons = 0;
4514                 bnapi->rx_prod_bseq = 0;
4515                 bnapi->rx_prod = 0;
4516                 bnapi->rx_cons = 0;
4517                 bnapi->rx_pg_prod = 0;
4518                 bnapi->rx_pg_cons = 0;
4519         }
4520 }
4521
4522 static void
4523 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4524 {
4525         u32 val, offset0, offset1, offset2, offset3;
4526         u32 cid_addr = GET_CID_ADDR(cid);
4527
4528         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4529                 offset0 = BNX2_L2CTX_TYPE_XI;
4530                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4531                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4532                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4533         } else {
4534                 offset0 = BNX2_L2CTX_TYPE;
4535                 offset1 = BNX2_L2CTX_CMD_TYPE;
4536                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4537                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4538         }
4539         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4540         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4541
4542         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4543         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4544
4545         val = (u64) bp->tx_desc_mapping >> 32;
4546         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4547
4548         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4549         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4550 }
4551
4552 static void
4553 bnx2_init_tx_ring(struct bnx2 *bp)
4554 {
4555         struct tx_bd *txbd;
4556         u32 cid = TX_CID;
4557         struct bnx2_napi *bnapi;
4558
4559         bp->tx_vec = 0;
4560         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4561                 cid = TX_TSS_CID;
4562                 bp->tx_vec = BNX2_TX_VEC;
4563                 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4564                        (TX_TSS_CID << 7));
4565         }
4566         bnapi = &bp->bnx2_napi[bp->tx_vec];
4567
4568         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4569
4570         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4571
4572         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4573         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4574
4575         bp->tx_prod = 0;
4576         bp->tx_prod_bseq = 0;
4577
4578         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4579         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4580
4581         bnx2_init_tx_context(bp, cid);
4582 }
4583
4584 static void
4585 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4586                      int num_rings)
4587 {
4588         int i;
4589         struct rx_bd *rxbd;
4590
4591         for (i = 0; i < num_rings; i++) {
4592                 int j;
4593
4594                 rxbd = &rx_ring[i][0];
4595                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4596                         rxbd->rx_bd_len = buf_size;
4597                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4598                 }
4599                 if (i == (num_rings - 1))
4600                         j = 0;
4601                 else
4602                         j = i + 1;
4603                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4604                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4605         }
4606 }
4607
4608 static void
4609 bnx2_init_rx_ring(struct bnx2 *bp)
4610 {
4611         int i;
4612         u16 prod, ring_prod;
4613         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4614         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4615
4616         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4617                              bp->rx_buf_use_size, bp->rx_max_ring);
4618
4619         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4620         if (bp->rx_pg_ring_size) {
4621                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4622                                      bp->rx_pg_desc_mapping,
4623                                      PAGE_SIZE, bp->rx_max_pg_ring);
4624                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4625                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4626                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4627                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4628
4629                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4630                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4631
4632                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4633                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4634
4635                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4636                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4637         }
4638
4639         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4640         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4641         val |= 0x02 << 8;
4642         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4643
4644         val = (u64) bp->rx_desc_mapping[0] >> 32;
4645         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4646
4647         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4648         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4649
4650         ring_prod = prod = bnapi->rx_pg_prod;
4651         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4652                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4653                         break;
4654                 prod = NEXT_RX_BD(prod);
4655                 ring_prod = RX_PG_RING_IDX(prod);
4656         }
4657         bnapi->rx_pg_prod = prod;
4658
4659         ring_prod = prod = bnapi->rx_prod;
4660         for (i = 0; i < bp->rx_ring_size; i++) {
4661                 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4662                         break;
4663                 }
4664                 prod = NEXT_RX_BD(prod);
4665                 ring_prod = RX_RING_IDX(prod);
4666         }
4667         bnapi->rx_prod = prod;
4668
4669         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4670                  bnapi->rx_pg_prod);
4671         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4672
4673         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4674 }
4675
4676 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4677 {
4678         u32 max, num_rings = 1;
4679
4680         while (ring_size > MAX_RX_DESC_CNT) {
4681                 ring_size -= MAX_RX_DESC_CNT;
4682                 num_rings++;
4683         }
4684         /* round to next power of 2 */
4685         max = max_size;
4686         while ((max & num_rings) == 0)
4687                 max >>= 1;
4688
4689         if (num_rings != max)
4690                 max <<= 1;
4691
4692         return max;
4693 }
4694
4695 static void
4696 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4697 {
4698         u32 rx_size, rx_space, jumbo_size;
4699
4700         /* 8 for CRC and VLAN */
4701         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4702
4703         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4704                 sizeof(struct skb_shared_info);
4705
4706         bp->rx_copy_thresh = RX_COPY_THRESH;
4707         bp->rx_pg_ring_size = 0;
4708         bp->rx_max_pg_ring = 0;
4709         bp->rx_max_pg_ring_idx = 0;
4710         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4711                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4712
4713                 jumbo_size = size * pages;
4714                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4715                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4716
4717                 bp->rx_pg_ring_size = jumbo_size;
4718                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4719                                                         MAX_RX_PG_RINGS);
4720                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4721                 rx_size = RX_COPY_THRESH + bp->rx_offset;
4722                 bp->rx_copy_thresh = 0;
4723         }
4724
4725         bp->rx_buf_use_size = rx_size;
4726         /* hw alignment */
4727         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4728         bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4729         bp->rx_ring_size = size;
4730         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4731         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4732 }
4733
4734 static void
4735 bnx2_free_tx_skbs(struct bnx2 *bp)
4736 {
4737         int i;
4738
4739         if (bp->tx_buf_ring == NULL)
4740                 return;
4741
4742         for (i = 0; i < TX_DESC_CNT; ) {
4743                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4744                 struct sk_buff *skb = tx_buf->skb;
4745                 int j, last;
4746
4747                 if (skb == NULL) {
4748                         i++;
4749                         continue;
4750                 }
4751
4752                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4753                         skb_headlen(skb), PCI_DMA_TODEVICE);
4754
4755                 tx_buf->skb = NULL;
4756
4757                 last = skb_shinfo(skb)->nr_frags;
4758                 for (j = 0; j < last; j++) {
4759                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4760                         pci_unmap_page(bp->pdev,
4761                                 pci_unmap_addr(tx_buf, mapping),
4762                                 skb_shinfo(skb)->frags[j].size,
4763                                 PCI_DMA_TODEVICE);
4764                 }
4765                 dev_kfree_skb(skb);
4766                 i += j + 1;
4767         }
4768
4769 }
4770
4771 static void
4772 bnx2_free_rx_skbs(struct bnx2 *bp)
4773 {
4774         int i;
4775
4776         if (bp->rx_buf_ring == NULL)
4777                 return;
4778
4779         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4780                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4781                 struct sk_buff *skb = rx_buf->skb;
4782
4783                 if (skb == NULL)
4784                         continue;
4785
4786                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4787                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4788
4789                 rx_buf->skb = NULL;
4790
4791                 dev_kfree_skb(skb);
4792         }
4793         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4794                 bnx2_free_rx_page(bp, i);
4795 }
4796
4797 static void
4798 bnx2_free_skbs(struct bnx2 *bp)
4799 {
4800         bnx2_free_tx_skbs(bp);
4801         bnx2_free_rx_skbs(bp);
4802 }
4803
4804 static int
4805 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4806 {
4807         int rc;
4808
4809         rc = bnx2_reset_chip(bp, reset_code);
4810         bnx2_free_skbs(bp);
4811         if (rc)
4812                 return rc;
4813
4814         if ((rc = bnx2_init_chip(bp)) != 0)
4815                 return rc;
4816
4817         bnx2_clear_ring_states(bp);
4818         bnx2_init_tx_ring(bp);
4819         bnx2_init_rx_ring(bp);
4820         return 0;
4821 }
4822
4823 static int
4824 bnx2_init_nic(struct bnx2 *bp)
4825 {
4826         int rc;
4827
4828         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4829                 return rc;
4830
4831         spin_lock_bh(&bp->phy_lock);
4832         bnx2_init_phy(bp);
4833         bnx2_set_link(bp);
4834         spin_unlock_bh(&bp->phy_lock);
4835         return 0;
4836 }
4837
4838 static int
4839 bnx2_test_registers(struct bnx2 *bp)
4840 {
4841         int ret;
4842         int i, is_5709;
4843         static const struct {
4844                 u16   offset;
4845                 u16   flags;
4846 #define BNX2_FL_NOT_5709        1
4847                 u32   rw_mask;
4848                 u32   ro_mask;
4849         } reg_tbl[] = {
4850                 { 0x006c, 0, 0x00000000, 0x0000003f },
4851                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4852                 { 0x0094, 0, 0x00000000, 0x00000000 },
4853
4854                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4855                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4856                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4857                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4858                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4859                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4860                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4861                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4862                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4863
4864                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4865                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4866                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4867                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4868                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4869                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4870
4871                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4872                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4873                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4874
4875                 { 0x1000, 0, 0x00000000, 0x00000001 },
4876                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4877
4878                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4879                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4880                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4881                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4882                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4883                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4884                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4885                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4886                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4887                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4888
4889                 { 0x1800, 0, 0x00000000, 0x00000001 },
4890                 { 0x1804, 0, 0x00000000, 0x00000003 },
4891
4892                 { 0x2800, 0, 0x00000000, 0x00000001 },
4893                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4894                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4895                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4896                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4897                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4898                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4899                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4900                 { 0x2840, 0, 0x00000000, 0xffffffff },
4901                 { 0x2844, 0, 0x00000000, 0xffffffff },
4902                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4903                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4904
4905                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4906                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4907
4908                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4909                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4910                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4911                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4912                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4913                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4914                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4915                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4916                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4917
4918                 { 0x5004, 0, 0x00000000, 0x0000007f },
4919                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4920
4921                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4922                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4923                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4924                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4925                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4926                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4927                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4928                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4929                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4930
4931                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4932                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4933                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4934                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4935                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4936                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4937                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4938                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4939                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4940                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4941                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4942                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4943                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4944                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4945                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4946                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4947                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4948                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4949                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4950                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4951                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4952                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4953                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4954
4955                 { 0xffff, 0, 0x00000000, 0x00000000 },
4956         };
4957
4958         ret = 0;
4959         is_5709 = 0;
4960         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4961                 is_5709 = 1;
4962
4963         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4964                 u32 offset, rw_mask, ro_mask, save_val, val;
4965                 u16 flags = reg_tbl[i].flags;
4966
4967                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4968                         continue;
4969
4970                 offset = (u32) reg_tbl[i].offset;
4971                 rw_mask = reg_tbl[i].rw_mask;
4972                 ro_mask = reg_tbl[i].ro_mask;
4973
4974                 save_val = readl(bp->regview + offset);
4975
4976                 writel(0, bp->regview + offset);
4977
4978                 val = readl(bp->regview + offset);
4979                 if ((val & rw_mask) != 0) {
4980                         goto reg_test_err;
4981                 }
4982
4983                 if ((val & ro_mask) != (save_val & ro_mask)) {
4984                         goto reg_test_err;
4985                 }
4986
4987                 writel(0xffffffff, bp->regview + offset);
4988
4989                 val = readl(bp->regview + offset);
4990                 if ((val & rw_mask) != rw_mask) {
4991                         goto reg_test_err;
4992                 }
4993
4994                 if ((val & ro_mask) != (save_val & ro_mask)) {
4995                         goto reg_test_err;
4996                 }
4997
4998                 writel(save_val, bp->regview + offset);
4999                 continue;
5000
5001 reg_test_err:
5002                 writel(save_val, bp->regview + offset);
5003                 ret = -ENODEV;
5004                 break;
5005         }
5006         return ret;
5007 }
5008
5009 static int
5010 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5011 {
5012         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5013                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5014         int i;
5015
5016         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5017                 u32 offset;
5018
5019                 for (offset = 0; offset < size; offset += 4) {
5020
5021                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5022
5023                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5024                                 test_pattern[i]) {
5025                                 return -ENODEV;
5026                         }
5027                 }
5028         }
5029         return 0;
5030 }
5031
5032 static int
5033 bnx2_test_memory(struct bnx2 *bp)
5034 {
5035         int ret = 0;
5036         int i;
5037         static struct mem_entry {
5038                 u32   offset;
5039                 u32   len;
5040         } mem_tbl_5706[] = {
5041                 { 0x60000,  0x4000 },
5042                 { 0xa0000,  0x3000 },
5043                 { 0xe0000,  0x4000 },
5044                 { 0x120000, 0x4000 },
5045                 { 0x1a0000, 0x4000 },
5046                 { 0x160000, 0x4000 },
5047                 { 0xffffffff, 0    },
5048         },
5049         mem_tbl_5709[] = {
5050                 { 0x60000,  0x4000 },
5051                 { 0xa0000,  0x3000 },
5052                 { 0xe0000,  0x4000 },
5053                 { 0x120000, 0x4000 },
5054                 { 0x1a0000, 0x4000 },
5055                 { 0xffffffff, 0    },
5056         };
5057         struct mem_entry *mem_tbl;
5058
5059         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5060                 mem_tbl = mem_tbl_5709;
5061         else
5062                 mem_tbl = mem_tbl_5706;
5063
5064         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5065                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5066                         mem_tbl[i].len)) != 0) {
5067                         return ret;
5068                 }
5069         }
5070
5071         return ret;
5072 }
5073
5074 #define BNX2_MAC_LOOPBACK       0
5075 #define BNX2_PHY_LOOPBACK       1
5076
5077 static int
5078 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5079 {
5080         unsigned int pkt_size, num_pkts, i;
5081         struct sk_buff *skb, *rx_skb;
5082         unsigned char *packet;
5083         u16 rx_start_idx, rx_idx;
5084         dma_addr_t map;
5085         struct tx_bd *txbd;
5086         struct sw_bd *rx_buf;
5087         struct l2_fhdr *rx_hdr;
5088         int ret = -ENODEV;
5089         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5090
5091         tx_napi = bnapi;
5092         if (bp->flags & BNX2_FLAG_USING_MSIX)
5093                 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5094
5095         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5096                 bp->loopback = MAC_LOOPBACK;
5097                 bnx2_set_mac_loopback(bp);
5098         }
5099         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5100                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5101                         return 0;
5102
5103                 bp->loopback = PHY_LOOPBACK;
5104                 bnx2_set_phy_loopback(bp);
5105         }
5106         else
5107                 return -EINVAL;
5108
5109         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5110         skb = netdev_alloc_skb(bp->dev, pkt_size);
5111         if (!skb)
5112                 return -ENOMEM;
5113         packet = skb_put(skb, pkt_size);
5114         memcpy(packet, bp->dev->dev_addr, 6);
5115         memset(packet + 6, 0x0, 8);
5116         for (i = 14; i < pkt_size; i++)
5117                 packet[i] = (unsigned char) (i & 0xff);
5118
5119         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5120                 PCI_DMA_TODEVICE);
5121
5122         REG_WR(bp, BNX2_HC_COMMAND,
5123                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5124
5125         REG_RD(bp, BNX2_HC_COMMAND);
5126
5127         udelay(5);
5128         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5129
5130         num_pkts = 0;
5131
5132         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5133
5134         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5135         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5136         txbd->tx_bd_mss_nbytes = pkt_size;
5137         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5138
5139         num_pkts++;
5140         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5141         bp->tx_prod_bseq += pkt_size;
5142
5143         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5144         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5145
5146         udelay(100);
5147
5148         REG_WR(bp, BNX2_HC_COMMAND,
5149                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5150
5151         REG_RD(bp, BNX2_HC_COMMAND);
5152
5153         udelay(5);
5154
5155         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5156         dev_kfree_skb(skb);
5157
5158         if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5159                 goto loopback_test_done;
5160
5161         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5162         if (rx_idx != rx_start_idx + num_pkts) {
5163                 goto loopback_test_done;
5164         }
5165
5166         rx_buf = &bp->rx_buf_ring[rx_start_idx];
5167         rx_skb = rx_buf->skb;
5168
5169         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5170         skb_reserve(rx_skb, bp->rx_offset);
5171
5172         pci_dma_sync_single_for_cpu(bp->pdev,
5173                 pci_unmap_addr(rx_buf, mapping),
5174                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5175
5176         if (rx_hdr->l2_fhdr_status &
5177                 (L2_FHDR_ERRORS_BAD_CRC |
5178                 L2_FHDR_ERRORS_PHY_DECODE |
5179                 L2_FHDR_ERRORS_ALIGNMENT |
5180                 L2_FHDR_ERRORS_TOO_SHORT |
5181                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5182
5183                 goto loopback_test_done;
5184         }
5185
5186         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5187                 goto loopback_test_done;
5188         }
5189
5190         for (i = 14; i < pkt_size; i++) {
5191                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5192                         goto loopback_test_done;
5193                 }
5194         }
5195
5196         ret = 0;
5197
5198 loopback_test_done:
5199         bp->loopback = 0;
5200         return ret;
5201 }
5202
5203 #define BNX2_MAC_LOOPBACK_FAILED        1
5204 #define BNX2_PHY_LOOPBACK_FAILED        2
5205 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5206                                          BNX2_PHY_LOOPBACK_FAILED)
5207
5208 static int
5209 bnx2_test_loopback(struct bnx2 *bp)
5210 {
5211         int rc = 0;
5212
5213         if (!netif_running(bp->dev))
5214                 return BNX2_LOOPBACK_FAILED;
5215
5216         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5217         spin_lock_bh(&bp->phy_lock);
5218         bnx2_init_phy(bp);
5219         spin_unlock_bh(&bp->phy_lock);
5220         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5221                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5222         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5223                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5224         return rc;
5225 }
5226
5227 #define NVRAM_SIZE 0x200
5228 #define CRC32_RESIDUAL 0xdebb20e3
5229
5230 static int
5231 bnx2_test_nvram(struct bnx2 *bp)
5232 {
5233         __be32 buf[NVRAM_SIZE / 4];
5234         u8 *data = (u8 *) buf;
5235         int rc = 0;
5236         u32 magic, csum;
5237
5238         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5239                 goto test_nvram_done;
5240
5241         magic = be32_to_cpu(buf[0]);
5242         if (magic != 0x669955aa) {
5243                 rc = -ENODEV;
5244                 goto test_nvram_done;
5245         }
5246
5247         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5248                 goto test_nvram_done;
5249
5250         csum = ether_crc_le(0x100, data);
5251         if (csum != CRC32_RESIDUAL) {
5252                 rc = -ENODEV;
5253                 goto test_nvram_done;
5254         }
5255
5256         csum = ether_crc_le(0x100, data + 0x100);
5257         if (csum != CRC32_RESIDUAL) {
5258                 rc = -ENODEV;
5259         }
5260
5261 test_nvram_done:
5262         return rc;
5263 }
5264
5265 static int
5266 bnx2_test_link(struct bnx2 *bp)
5267 {
5268         u32 bmsr;
5269
5270         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5271                 if (bp->link_up)
5272                         return 0;
5273                 return -ENODEV;
5274         }
5275         spin_lock_bh(&bp->phy_lock);
5276         bnx2_enable_bmsr1(bp);
5277         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5278         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5279         bnx2_disable_bmsr1(bp);
5280         spin_unlock_bh(&bp->phy_lock);
5281
5282         if (bmsr & BMSR_LSTATUS) {
5283                 return 0;
5284         }
5285         return -ENODEV;
5286 }
5287
5288 static int
5289 bnx2_test_intr(struct bnx2 *bp)
5290 {
5291         int i;
5292         u16 status_idx;
5293
5294         if (!netif_running(bp->dev))
5295                 return -ENODEV;
5296
5297         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5298
5299         /* This register is not touched during run-time. */
5300         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5301         REG_RD(bp, BNX2_HC_COMMAND);
5302
5303         for (i = 0; i < 10; i++) {
5304                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5305                         status_idx) {
5306
5307                         break;
5308                 }
5309
5310                 msleep_interruptible(10);
5311         }
5312         if (i < 10)
5313                 return 0;
5314
5315         return -ENODEV;
5316 }
5317
5318 static int
5319 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5320 {
5321         u32 mode_ctl, an_dbg, exp;
5322
5323         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5324         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5325
5326         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5327                 return 0;
5328
5329         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5330         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5331         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5332
5333         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5334                 return 0;
5335
5336         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5337         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5338         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5339
5340         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5341                 return 0;
5342
5343         return 1;
5344 }
5345
5346 static void
5347 bnx2_5706_serdes_timer(struct bnx2 *bp)
5348 {
5349         int check_link = 1;
5350
5351         spin_lock(&bp->phy_lock);
5352         if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
5353                 bnx2_5706s_force_link_dn(bp, 0);
5354                 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
5355                 spin_unlock(&bp->phy_lock);
5356                 return;
5357         }
5358
5359         if (bp->serdes_an_pending) {
5360                 bp->serdes_an_pending--;
5361                 check_link = 0;
5362         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5363                 u32 bmcr;
5364
5365                 bp->current_interval = bp->timer_interval;
5366
5367                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5368
5369                 if (bmcr & BMCR_ANENABLE) {
5370                         if (bnx2_5706_serdes_has_link(bp)) {
5371                                 bmcr &= ~BMCR_ANENABLE;
5372                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5373                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5374                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5375                         }
5376                 }
5377         }
5378         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5379                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5380                 u32 phy2;
5381
5382                 check_link = 0;
5383                 bnx2_write_phy(bp, 0x17, 0x0f01);
5384                 bnx2_read_phy(bp, 0x15, &phy2);
5385                 if (phy2 & 0x20) {
5386                         u32 bmcr;
5387
5388                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5389                         bmcr |= BMCR_ANENABLE;
5390                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5391
5392                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5393                 }
5394         } else
5395                 bp->current_interval = bp->timer_interval;
5396
5397         if (bp->link_up && (bp->autoneg & AUTONEG_SPEED) && check_link) {
5398                 u32 val;
5399
5400                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5401                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5402                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5403
5404                 if (val & MISC_SHDW_AN_DBG_NOSYNC) {
5405                         bnx2_5706s_force_link_dn(bp, 1);
5406                         bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5407                 }
5408         }
5409         spin_unlock(&bp->phy_lock);
5410 }
5411
5412 static void
5413 bnx2_5708_serdes_timer(struct bnx2 *bp)
5414 {
5415         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5416                 return;
5417
5418         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5419                 bp->serdes_an_pending = 0;
5420                 return;
5421         }
5422
5423         spin_lock(&bp->phy_lock);
5424         if (bp->serdes_an_pending)
5425                 bp->serdes_an_pending--;
5426         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5427                 u32 bmcr;
5428
5429                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5430                 if (bmcr & BMCR_ANENABLE) {
5431                         bnx2_enable_forced_2g5(bp);
5432                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5433                 } else {
5434                         bnx2_disable_forced_2g5(bp);
5435                         bp->serdes_an_pending = 2;
5436                         bp->current_interval = bp->timer_interval;
5437                 }
5438
5439         } else
5440                 bp->current_interval = bp->timer_interval;
5441
5442         spin_unlock(&bp->phy_lock);
5443 }
5444
5445 static void
5446 bnx2_timer(unsigned long data)
5447 {
5448         struct bnx2 *bp = (struct bnx2 *) data;
5449
5450         if (!netif_running(bp->dev))
5451                 return;
5452
5453         if (atomic_read(&bp->intr_sem) != 0)
5454                 goto bnx2_restart_timer;
5455
5456         bnx2_send_heart_beat(bp);
5457
5458         bp->stats_blk->stat_FwRxDrop =
5459                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5460
5461         /* workaround occasional corrupted counters */
5462         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5463                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5464                                             BNX2_HC_COMMAND_STATS_NOW);
5465
5466         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5467                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5468                         bnx2_5706_serdes_timer(bp);
5469                 else
5470                         bnx2_5708_serdes_timer(bp);
5471         }
5472
5473 bnx2_restart_timer:
5474         mod_timer(&bp->timer, jiffies + bp->current_interval);
5475 }
5476
5477 static int
5478 bnx2_request_irq(struct bnx2 *bp)
5479 {
5480         struct net_device *dev = bp->dev;
5481         unsigned long flags;
5482         struct bnx2_irq *irq;
5483         int rc = 0, i;
5484
5485         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5486                 flags = 0;
5487         else
5488                 flags = IRQF_SHARED;
5489
5490         for (i = 0; i < bp->irq_nvecs; i++) {
5491                 irq = &bp->irq_tbl[i];
5492                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5493                                  dev);
5494                 if (rc)
5495                         break;
5496                 irq->requested = 1;
5497         }
5498         return rc;
5499 }
5500
5501 static void
5502 bnx2_free_irq(struct bnx2 *bp)
5503 {
5504         struct net_device *dev = bp->dev;
5505         struct bnx2_irq *irq;
5506         int i;
5507
5508         for (i = 0; i < bp->irq_nvecs; i++) {
5509                 irq = &bp->irq_tbl[i];
5510                 if (irq->requested)
5511                         free_irq(irq->vector, dev);
5512                 irq->requested = 0;
5513         }
5514         if (bp->flags & BNX2_FLAG_USING_MSI)
5515                 pci_disable_msi(bp->pdev);
5516         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5517                 pci_disable_msix(bp->pdev);
5518
5519         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5520 }
5521
5522 static void
5523 bnx2_enable_msix(struct bnx2 *bp)
5524 {
5525         int i, rc;
5526         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5527
5528         bnx2_setup_msix_tbl(bp);
5529         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5530         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5531         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5532
5533         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5534                 msix_ent[i].entry = i;
5535                 msix_ent[i].vector = 0;
5536         }
5537
5538         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5539         if (rc != 0)
5540                 return;
5541
5542         bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5543         bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5544
5545         strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5546         strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5547         strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5548         strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5549
5550         bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5551         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5552         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5553                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5554 }
5555
5556 static void
5557 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5558 {
5559         bp->irq_tbl[0].handler = bnx2_interrupt;
5560         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5561         bp->irq_nvecs = 1;
5562         bp->irq_tbl[0].vector = bp->pdev->irq;
5563
5564         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5565                 bnx2_enable_msix(bp);
5566
5567         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5568             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5569                 if (pci_enable_msi(bp->pdev) == 0) {
5570                         bp->flags |= BNX2_FLAG_USING_MSI;
5571                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5572                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5573                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5574                         } else
5575                                 bp->irq_tbl[0].handler = bnx2_msi;
5576
5577                         bp->irq_tbl[0].vector = bp->pdev->irq;
5578                 }
5579         }
5580 }
5581
5582 /* Called with rtnl_lock */
5583 static int
5584 bnx2_open(struct net_device *dev)
5585 {
5586         struct bnx2 *bp = netdev_priv(dev);
5587         int rc;
5588
5589         netif_carrier_off(dev);
5590
5591         bnx2_set_power_state(bp, PCI_D0);
5592         bnx2_disable_int(bp);
5593
5594         rc = bnx2_alloc_mem(bp);
5595         if (rc)
5596                 return rc;
5597
5598         bnx2_setup_int_mode(bp, disable_msi);
5599         bnx2_napi_enable(bp);
5600         rc = bnx2_request_irq(bp);
5601
5602         if (rc) {
5603                 bnx2_napi_disable(bp);
5604                 bnx2_free_mem(bp);
5605                 return rc;
5606         }
5607
5608         rc = bnx2_init_nic(bp);
5609
5610         if (rc) {
5611                 bnx2_napi_disable(bp);
5612                 bnx2_free_irq(bp);
5613                 bnx2_free_skbs(bp);
5614                 bnx2_free_mem(bp);
5615                 return rc;
5616         }
5617
5618         mod_timer(&bp->timer, jiffies + bp->current_interval);
5619
5620         atomic_set(&bp->intr_sem, 0);
5621
5622         bnx2_enable_int(bp);
5623
5624         if (bp->flags & BNX2_FLAG_USING_MSI) {
5625                 /* Test MSI to make sure it is working
5626                  * If MSI test fails, go back to INTx mode
5627                  */
5628                 if (bnx2_test_intr(bp) != 0) {
5629                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5630                                " using MSI, switching to INTx mode. Please"
5631                                " report this failure to the PCI maintainer"
5632                                " and include system chipset information.\n",
5633                                bp->dev->name);
5634
5635                         bnx2_disable_int(bp);
5636                         bnx2_free_irq(bp);
5637
5638                         bnx2_setup_int_mode(bp, 1);
5639
5640                         rc = bnx2_init_nic(bp);
5641
5642                         if (!rc)
5643                                 rc = bnx2_request_irq(bp);
5644
5645                         if (rc) {
5646                                 bnx2_napi_disable(bp);
5647                                 bnx2_free_skbs(bp);
5648                                 bnx2_free_mem(bp);
5649                                 del_timer_sync(&bp->timer);
5650                                 return rc;
5651                         }
5652                         bnx2_enable_int(bp);
5653                 }
5654         }
5655         if (bp->flags & BNX2_FLAG_USING_MSI)
5656                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5657         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5658                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5659
5660         netif_start_queue(dev);
5661
5662         return 0;
5663 }
5664
5665 static void
5666 bnx2_reset_task(struct work_struct *work)
5667 {
5668         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5669
5670         if (!netif_running(bp->dev))
5671                 return;
5672
5673         bp->in_reset_task = 1;
5674         bnx2_netif_stop(bp);
5675
5676         bnx2_init_nic(bp);
5677
5678         atomic_set(&bp->intr_sem, 1);
5679         bnx2_netif_start(bp);
5680         bp->in_reset_task = 0;
5681 }
5682
5683 static void
5684 bnx2_tx_timeout(struct net_device *dev)
5685 {
5686         struct bnx2 *bp = netdev_priv(dev);
5687
5688         /* This allows the netif to be shutdown gracefully before resetting */
5689         schedule_work(&bp->reset_task);
5690 }
5691
5692 #ifdef BCM_VLAN
5693 /* Called with rtnl_lock */
5694 static void
5695 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5696 {
5697         struct bnx2 *bp = netdev_priv(dev);
5698
5699         bnx2_netif_stop(bp);
5700
5701         bp->vlgrp = vlgrp;
5702         bnx2_set_rx_mode(dev);
5703
5704         bnx2_netif_start(bp);
5705 }
5706 #endif
5707
5708 /* Called with netif_tx_lock.
5709  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5710  * netif_wake_queue().
5711  */
5712 static int
5713 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5714 {
5715         struct bnx2 *bp = netdev_priv(dev);
5716         dma_addr_t mapping;
5717         struct tx_bd *txbd;
5718         struct sw_bd *tx_buf;
5719         u32 len, vlan_tag_flags, last_frag, mss;
5720         u16 prod, ring_prod;
5721         int i;
5722         struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5723
5724         if (unlikely(bnx2_tx_avail(bp, bnapi) <
5725             (skb_shinfo(skb)->nr_frags + 1))) {
5726                 netif_stop_queue(dev);
5727                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5728                         dev->name);
5729
5730                 return NETDEV_TX_BUSY;
5731         }
5732         len = skb_headlen(skb);
5733         prod = bp->tx_prod;
5734         ring_prod = TX_RING_IDX(prod);
5735
5736         vlan_tag_flags = 0;
5737         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5738                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5739         }
5740
5741         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5742                 vlan_tag_flags |=
5743                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5744         }
5745         if ((mss = skb_shinfo(skb)->gso_size)) {
5746                 u32 tcp_opt_len, ip_tcp_len;
5747                 struct iphdr *iph;
5748
5749                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5750
5751                 tcp_opt_len = tcp_optlen(skb);
5752
5753                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5754                         u32 tcp_off = skb_transport_offset(skb) -
5755                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5756
5757                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5758                                           TX_BD_FLAGS_SW_FLAGS;
5759                         if (likely(tcp_off == 0))
5760                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5761                         else {
5762                                 tcp_off >>= 3;
5763                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5764                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5765                                                   ((tcp_off & 0x10) <<
5766                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5767                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5768                         }
5769                 } else {
5770                         if (skb_header_cloned(skb) &&
5771                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5772                                 dev_kfree_skb(skb);
5773                                 return NETDEV_TX_OK;
5774                         }
5775
5776                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5777
5778                         iph = ip_hdr(skb);
5779                         iph->check = 0;
5780                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5781                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5782                                                                  iph->daddr, 0,
5783                                                                  IPPROTO_TCP,
5784                                                                  0);
5785                         if (tcp_opt_len || (iph->ihl > 5)) {
5786                                 vlan_tag_flags |= ((iph->ihl - 5) +
5787                                                    (tcp_opt_len >> 2)) << 8;
5788                         }
5789                 }
5790         } else
5791                 mss = 0;
5792
5793         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5794
5795         tx_buf = &bp->tx_buf_ring[ring_prod];
5796         tx_buf->skb = skb;
5797         pci_unmap_addr_set(tx_buf, mapping, mapping);
5798
5799         txbd = &bp->tx_desc_ring[ring_prod];
5800
5801         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5802         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5803         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5804         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5805
5806         last_frag = skb_shinfo(skb)->nr_frags;
5807
5808         for (i = 0; i < last_frag; i++) {
5809                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5810
5811                 prod = NEXT_TX_BD(prod);
5812                 ring_prod = TX_RING_IDX(prod);
5813                 txbd = &bp->tx_desc_ring[ring_prod];
5814
5815                 len = frag->size;
5816                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5817                         len, PCI_DMA_TODEVICE);
5818                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5819                                 mapping, mapping);
5820
5821                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5822                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5823                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5824                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5825
5826         }
5827         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5828
5829         prod = NEXT_TX_BD(prod);
5830         bp->tx_prod_bseq += skb->len;
5831
5832         REG_WR16(bp, bp->tx_bidx_addr, prod);
5833         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5834
5835         mmiowb();
5836
5837         bp->tx_prod = prod;
5838         dev->trans_start = jiffies;
5839
5840         if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5841                 netif_stop_queue(dev);
5842                 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5843                         netif_wake_queue(dev);
5844         }
5845
5846         return NETDEV_TX_OK;
5847 }
5848
5849 /* Called with rtnl_lock */
5850 static int
5851 bnx2_close(struct net_device *dev)
5852 {
5853         struct bnx2 *bp = netdev_priv(dev);
5854         u32 reset_code;
5855
5856         /* Calling flush_scheduled_work() may deadlock because
5857          * linkwatch_event() may be on the workqueue and it will try to get
5858          * the rtnl_lock which we are holding.
5859          */
5860         while (bp->in_reset_task)
5861                 msleep(1);
5862
5863         bnx2_disable_int_sync(bp);
5864         bnx2_napi_disable(bp);
5865         del_timer_sync(&bp->timer);
5866         if (bp->flags & BNX2_FLAG_NO_WOL)
5867                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5868         else if (bp->wol)
5869                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5870         else
5871                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5872         bnx2_reset_chip(bp, reset_code);
5873         bnx2_free_irq(bp);
5874         bnx2_free_skbs(bp);
5875         bnx2_free_mem(bp);
5876         bp->link_up = 0;
5877         netif_carrier_off(bp->dev);
5878         bnx2_set_power_state(bp, PCI_D3hot);
5879         return 0;
5880 }
5881
5882 #define GET_NET_STATS64(ctr)                                    \
5883         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5884         (unsigned long) (ctr##_lo)
5885
5886 #define GET_NET_STATS32(ctr)            \
5887         (ctr##_lo)
5888
5889 #if (BITS_PER_LONG == 64)
5890 #define GET_NET_STATS   GET_NET_STATS64
5891 #else
5892 #define GET_NET_STATS   GET_NET_STATS32
5893 #endif
5894
5895 static struct net_device_stats *
5896 bnx2_get_stats(struct net_device *dev)
5897 {
5898         struct bnx2 *bp = netdev_priv(dev);
5899         struct statistics_block *stats_blk = bp->stats_blk;
5900         struct net_device_stats *net_stats = &bp->net_stats;
5901
5902         if (bp->stats_blk == NULL) {
5903                 return net_stats;
5904         }
5905         net_stats->rx_packets =
5906                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5907                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5908                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5909
5910         net_stats->tx_packets =
5911                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5912                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5913                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5914
5915         net_stats->rx_bytes =
5916                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5917
5918         net_stats->tx_bytes =
5919                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5920
5921         net_stats->multicast =
5922                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5923
5924         net_stats->collisions =
5925                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5926
5927         net_stats->rx_length_errors =
5928                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5929                 stats_blk->stat_EtherStatsOverrsizePkts);
5930
5931         net_stats->rx_over_errors =
5932                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5933
5934         net_stats->rx_frame_errors =
5935                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5936
5937         net_stats->rx_crc_errors =
5938                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5939
5940         net_stats->rx_errors = net_stats->rx_length_errors +
5941                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5942                 net_stats->rx_crc_errors;
5943
5944         net_stats->tx_aborted_errors =
5945                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5946                 stats_blk->stat_Dot3StatsLateCollisions);
5947
5948         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5949             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5950                 net_stats->tx_carrier_errors = 0;
5951         else {
5952                 net_stats->tx_carrier_errors =
5953                         (unsigned long)
5954                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5955         }
5956
5957         net_stats->tx_errors =
5958                 (unsigned long)
5959                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5960                 +
5961                 net_stats->tx_aborted_errors +
5962                 net_stats->tx_carrier_errors;
5963
5964         net_stats->rx_missed_errors =
5965                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5966                 stats_blk->stat_FwRxDrop);
5967
5968         return net_stats;
5969 }
5970
5971 /* All ethtool functions called with rtnl_lock */
5972
5973 static int
5974 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5975 {
5976         struct bnx2 *bp = netdev_priv(dev);
5977         int support_serdes = 0, support_copper = 0;
5978
5979         cmd->supported = SUPPORTED_Autoneg;
5980         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5981                 support_serdes = 1;
5982                 support_copper = 1;
5983         } else if (bp->phy_port == PORT_FIBRE)
5984                 support_serdes = 1;
5985         else
5986                 support_copper = 1;
5987
5988         if (support_serdes) {
5989                 cmd->supported |= SUPPORTED_1000baseT_Full |
5990                         SUPPORTED_FIBRE;
5991                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
5992                         cmd->supported |= SUPPORTED_2500baseX_Full;
5993
5994         }
5995         if (support_copper) {
5996                 cmd->supported |= SUPPORTED_10baseT_Half |
5997                         SUPPORTED_10baseT_Full |
5998                         SUPPORTED_100baseT_Half |
5999                         SUPPORTED_100baseT_Full |
6000                         SUPPORTED_1000baseT_Full |
6001                         SUPPORTED_TP;
6002
6003         }
6004
6005         spin_lock_bh(&bp->phy_lock);
6006         cmd->port = bp->phy_port;
6007         cmd->advertising = bp->advertising;
6008
6009         if (bp->autoneg & AUTONEG_SPEED) {
6010                 cmd->autoneg = AUTONEG_ENABLE;
6011         }
6012         else {
6013                 cmd->autoneg = AUTONEG_DISABLE;
6014         }
6015
6016         if (netif_carrier_ok(dev)) {
6017                 cmd->speed = bp->line_speed;
6018                 cmd->duplex = bp->duplex;
6019         }
6020         else {
6021                 cmd->speed = -1;
6022                 cmd->duplex = -1;
6023         }
6024         spin_unlock_bh(&bp->phy_lock);
6025
6026         cmd->transceiver = XCVR_INTERNAL;
6027         cmd->phy_address = bp->phy_addr;
6028
6029         return 0;
6030 }
6031
6032 static int
6033 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6034 {
6035         struct bnx2 *bp = netdev_priv(dev);
6036         u8 autoneg = bp->autoneg;
6037         u8 req_duplex = bp->req_duplex;
6038         u16 req_line_speed = bp->req_line_speed;
6039         u32 advertising = bp->advertising;
6040         int err = -EINVAL;
6041
6042         spin_lock_bh(&bp->phy_lock);
6043
6044         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6045                 goto err_out_unlock;
6046
6047         if (cmd->port != bp->phy_port &&
6048             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6049                 goto err_out_unlock;
6050
6051         if (cmd->autoneg == AUTONEG_ENABLE) {
6052                 autoneg |= AUTONEG_SPEED;
6053
6054                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6055
6056                 /* allow advertising 1 speed */
6057                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6058                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6059                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6060                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6061
6062                         if (cmd->port == PORT_FIBRE)
6063                                 goto err_out_unlock;
6064
6065                         advertising = cmd->advertising;
6066
6067                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6068                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6069                             (cmd->port == PORT_TP))
6070                                 goto err_out_unlock;
6071                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6072                         advertising = cmd->advertising;
6073                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6074                         goto err_out_unlock;
6075                 else {
6076                         if (cmd->port == PORT_FIBRE)
6077                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6078                         else
6079                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6080                 }
6081                 advertising |= ADVERTISED_Autoneg;
6082         }
6083         else {
6084                 if (cmd->port == PORT_FIBRE) {
6085                         if ((cmd->speed != SPEED_1000 &&
6086                              cmd->speed != SPEED_2500) ||
6087                             (cmd->duplex != DUPLEX_FULL))
6088                                 goto err_out_unlock;
6089
6090                         if (cmd->speed == SPEED_2500 &&
6091                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6092                                 goto err_out_unlock;
6093                 }
6094                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6095                         goto err_out_unlock;
6096
6097                 autoneg &= ~AUTONEG_SPEED;
6098                 req_line_speed = cmd->speed;
6099                 req_duplex = cmd->duplex;
6100                 advertising = 0;
6101         }
6102
6103         bp->autoneg = autoneg;
6104         bp->advertising = advertising;
6105         bp->req_line_speed = req_line_speed;
6106         bp->req_duplex = req_duplex;
6107
6108         err = bnx2_setup_phy(bp, cmd->port);
6109
6110 err_out_unlock:
6111         spin_unlock_bh(&bp->phy_lock);
6112
6113         return err;
6114 }
6115
6116 static void
6117 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6118 {
6119         struct bnx2 *bp = netdev_priv(dev);
6120
6121         strcpy(info->driver, DRV_MODULE_NAME);
6122         strcpy(info->version, DRV_MODULE_VERSION);
6123         strcpy(info->bus_info, pci_name(bp->pdev));
6124         strcpy(info->fw_version, bp->fw_version);
6125 }
6126
6127 #define BNX2_REGDUMP_LEN                (32 * 1024)
6128
6129 static int
6130 bnx2_get_regs_len(struct net_device *dev)
6131 {
6132         return BNX2_REGDUMP_LEN;
6133 }
6134
6135 static void
6136 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6137 {
6138         u32 *p = _p, i, offset;
6139         u8 *orig_p = _p;
6140         struct bnx2 *bp = netdev_priv(dev);
6141         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6142                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6143                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6144                                  0x1040, 0x1048, 0x1080, 0x10a4,
6145                                  0x1400, 0x1490, 0x1498, 0x14f0,
6146                                  0x1500, 0x155c, 0x1580, 0x15dc,
6147                                  0x1600, 0x1658, 0x1680, 0x16d8,
6148                                  0x1800, 0x1820, 0x1840, 0x1854,
6149                                  0x1880, 0x1894, 0x1900, 0x1984,
6150                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6151                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6152                                  0x2000, 0x2030, 0x23c0, 0x2400,
6153                                  0x2800, 0x2820, 0x2830, 0x2850,
6154                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6155                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6156                                  0x4080, 0x4090, 0x43c0, 0x4458,
6157                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6158                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6159                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6160                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6161                                  0x6800, 0x6848, 0x684c, 0x6860,
6162                                  0x6888, 0x6910, 0x8000 };
6163
6164         regs->version = 0;
6165
6166         memset(p, 0, BNX2_REGDUMP_LEN);
6167
6168         if (!netif_running(bp->dev))
6169                 return;
6170
6171         i = 0;
6172         offset = reg_boundaries[0];
6173         p += offset;
6174         while (offset < BNX2_REGDUMP_LEN) {
6175                 *p++ = REG_RD(bp, offset);
6176                 offset += 4;
6177                 if (offset == reg_boundaries[i + 1]) {
6178                         offset = reg_boundaries[i + 2];
6179                         p = (u32 *) (orig_p + offset);
6180                         i += 2;
6181                 }
6182         }
6183 }
6184
6185 static void
6186 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6187 {
6188         struct bnx2 *bp = netdev_priv(dev);
6189
6190         if (bp->flags & BNX2_FLAG_NO_WOL) {
6191                 wol->supported = 0;
6192                 wol->wolopts = 0;
6193         }
6194         else {
6195                 wol->supported = WAKE_MAGIC;
6196                 if (bp->wol)
6197                         wol->wolopts = WAKE_MAGIC;
6198                 else
6199                         wol->wolopts = 0;
6200         }
6201         memset(&wol->sopass, 0, sizeof(wol->sopass));
6202 }
6203
6204 static int
6205 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6206 {
6207         struct bnx2 *bp = netdev_priv(dev);
6208
6209         if (wol->wolopts & ~WAKE_MAGIC)
6210                 return -EINVAL;
6211
6212         if (wol->wolopts & WAKE_MAGIC) {
6213                 if (bp->flags & BNX2_FLAG_NO_WOL)
6214                         return -EINVAL;
6215
6216                 bp->wol = 1;
6217         }
6218         else {
6219                 bp->wol = 0;
6220         }
6221         return 0;
6222 }
6223
6224 static int
6225 bnx2_nway_reset(struct net_device *dev)
6226 {
6227         struct bnx2 *bp = netdev_priv(dev);
6228         u32 bmcr;
6229
6230         if (!(bp->autoneg & AUTONEG_SPEED)) {
6231                 return -EINVAL;
6232         }
6233
6234         spin_lock_bh(&bp->phy_lock);
6235
6236         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6237                 int rc;
6238
6239                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6240                 spin_unlock_bh(&bp->phy_lock);
6241                 return rc;
6242         }
6243
6244         /* Force a link down visible on the other side */
6245         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6246                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6247                 spin_unlock_bh(&bp->phy_lock);
6248
6249                 msleep(20);
6250
6251                 spin_lock_bh(&bp->phy_lock);
6252
6253                 bp->current_interval = SERDES_AN_TIMEOUT;
6254                 bp->serdes_an_pending = 1;
6255                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6256         }
6257
6258         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6259         bmcr &= ~BMCR_LOOPBACK;
6260         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6261
6262         spin_unlock_bh(&bp->phy_lock);
6263
6264         return 0;
6265 }
6266
6267 static int
6268 bnx2_get_eeprom_len(struct net_device *dev)
6269 {
6270         struct bnx2 *bp = netdev_priv(dev);
6271
6272         if (bp->flash_info == NULL)
6273                 return 0;
6274
6275         return (int) bp->flash_size;
6276 }
6277
6278 static int
6279 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6280                 u8 *eebuf)
6281 {
6282         struct bnx2 *bp = netdev_priv(dev);
6283         int rc;
6284
6285         /* parameters already validated in ethtool_get_eeprom */
6286
6287         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6288
6289         return rc;
6290 }
6291
6292 static int
6293 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6294                 u8 *eebuf)
6295 {
6296         struct bnx2 *bp = netdev_priv(dev);
6297         int rc;
6298
6299         /* parameters already validated in ethtool_set_eeprom */
6300
6301         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6302
6303         return rc;
6304 }
6305
6306 static int
6307 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6308 {
6309         struct bnx2 *bp = netdev_priv(dev);
6310
6311         memset(coal, 0, sizeof(struct ethtool_coalesce));
6312
6313         coal->rx_coalesce_usecs = bp->rx_ticks;
6314         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6315         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6316         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6317
6318         coal->tx_coalesce_usecs = bp->tx_ticks;
6319         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6320         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6321         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6322
6323         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6324
6325         return 0;
6326 }
6327
6328 static int
6329 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6330 {
6331         struct bnx2 *bp = netdev_priv(dev);
6332
6333         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6334         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6335
6336         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6337         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6338
6339         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6340         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6341
6342         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6343         if (bp->rx_quick_cons_trip_int > 0xff)
6344                 bp->rx_quick_cons_trip_int = 0xff;
6345
6346         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6347         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6348
6349         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6350         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6351
6352         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6353         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6354
6355         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6356         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6357                 0xff;
6358
6359         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6360         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6361                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6362                         bp->stats_ticks = USEC_PER_SEC;
6363         }
6364         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6365                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6366         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6367
6368         if (netif_running(bp->dev)) {
6369                 bnx2_netif_stop(bp);
6370                 bnx2_init_nic(bp);
6371                 bnx2_netif_start(bp);
6372         }
6373
6374         return 0;
6375 }
6376
6377 static void
6378 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6379 {
6380         struct bnx2 *bp = netdev_priv(dev);
6381
6382         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6383         ering->rx_mini_max_pending = 0;
6384         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6385
6386         ering->rx_pending = bp->rx_ring_size;
6387         ering->rx_mini_pending = 0;
6388         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6389
6390         ering->tx_max_pending = MAX_TX_DESC_CNT;
6391         ering->tx_pending = bp->tx_ring_size;
6392 }
6393
6394 static int
6395 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6396 {
6397         if (netif_running(bp->dev)) {
6398                 bnx2_netif_stop(bp);
6399                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6400                 bnx2_free_skbs(bp);
6401                 bnx2_free_mem(bp);
6402         }
6403
6404         bnx2_set_rx_ring_size(bp, rx);
6405         bp->tx_ring_size = tx;
6406
6407         if (netif_running(bp->dev)) {
6408                 int rc;
6409
6410                 rc = bnx2_alloc_mem(bp);
6411                 if (rc)
6412                         return rc;
6413                 bnx2_init_nic(bp);
6414                 bnx2_netif_start(bp);
6415         }
6416         return 0;
6417 }
6418
6419 static int
6420 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6421 {
6422         struct bnx2 *bp = netdev_priv(dev);
6423         int rc;
6424
6425         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6426                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6427                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6428
6429                 return -EINVAL;
6430         }
6431         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6432         return rc;
6433 }
6434
6435 static void
6436 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6437 {
6438         struct bnx2 *bp = netdev_priv(dev);
6439
6440         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6441         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6442         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6443 }
6444
6445 static int
6446 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6447 {
6448         struct bnx2 *bp = netdev_priv(dev);
6449
6450         bp->req_flow_ctrl = 0;
6451         if (epause->rx_pause)
6452                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6453         if (epause->tx_pause)
6454                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6455
6456         if (epause->autoneg) {
6457                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6458         }
6459         else {
6460                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6461         }
6462
6463         spin_lock_bh(&bp->phy_lock);
6464
6465         bnx2_setup_phy(bp, bp->phy_port);
6466
6467         spin_unlock_bh(&bp->phy_lock);
6468
6469         return 0;
6470 }
6471
6472 static u32
6473 bnx2_get_rx_csum(struct net_device *dev)
6474 {
6475         struct bnx2 *bp = netdev_priv(dev);
6476
6477         return bp->rx_csum;
6478 }
6479
6480 static int
6481 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6482 {
6483         struct bnx2 *bp = netdev_priv(dev);
6484
6485         bp->rx_csum = data;
6486         return 0;
6487 }
6488
6489 static int
6490 bnx2_set_tso(struct net_device *dev, u32 data)
6491 {
6492         struct bnx2 *bp = netdev_priv(dev);
6493
6494         if (data) {
6495                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6496                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6497                         dev->features |= NETIF_F_TSO6;
6498         } else
6499                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6500                                    NETIF_F_TSO_ECN);
6501         return 0;
6502 }
6503
6504 #define BNX2_NUM_STATS 46
6505
6506 static struct {
6507         char string[ETH_GSTRING_LEN];
6508 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6509         { "rx_bytes" },
6510         { "rx_error_bytes" },
6511         { "tx_bytes" },
6512         { "tx_error_bytes" },
6513         { "rx_ucast_packets" },
6514         { "rx_mcast_packets" },
6515         { "rx_bcast_packets" },
6516         { "tx_ucast_packets" },
6517         { "tx_mcast_packets" },
6518         { "tx_bcast_packets" },
6519         { "tx_mac_errors" },
6520         { "tx_carrier_errors" },
6521         { "rx_crc_errors" },
6522         { "rx_align_errors" },
6523         { "tx_single_collisions" },
6524         { "tx_multi_collisions" },
6525         { "tx_deferred" },
6526         { "tx_excess_collisions" },
6527         { "tx_late_collisions" },
6528         { "tx_total_collisions" },
6529         { "rx_fragments" },
6530         { "rx_jabbers" },
6531         { "rx_undersize_packets" },
6532         { "rx_oversize_packets" },
6533         { "rx_64_byte_packets" },
6534         { "rx_65_to_127_byte_packets" },
6535         { "rx_128_to_255_byte_packets" },
6536         { "rx_256_to_511_byte_packets" },
6537         { "rx_512_to_1023_byte_packets" },
6538         { "rx_1024_to_1522_byte_packets" },
6539         { "rx_1523_to_9022_byte_packets" },
6540         { "tx_64_byte_packets" },
6541         { "tx_65_to_127_byte_packets" },
6542         { "tx_128_to_255_byte_packets" },
6543         { "tx_256_to_511_byte_packets" },
6544         { "tx_512_to_1023_byte_packets" },
6545         { "tx_1024_to_1522_byte_packets" },
6546         { "tx_1523_to_9022_byte_packets" },
6547         { "rx_xon_frames" },
6548         { "rx_xoff_frames" },
6549         { "tx_xon_frames" },
6550         { "tx_xoff_frames" },
6551         { "rx_mac_ctrl_frames" },
6552         { "rx_filtered_packets" },
6553         { "rx_discards" },
6554         { "rx_fw_discards" },
6555 };
6556
6557 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6558
6559 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6560     STATS_OFFSET32(stat_IfHCInOctets_hi),
6561     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6562     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6563     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6564     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6565     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6566     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6567     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6568     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6569     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6570     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6571     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6572     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6573     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6574     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6575     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6576     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6577     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6578     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6579     STATS_OFFSET32(stat_EtherStatsCollisions),
6580     STATS_OFFSET32(stat_EtherStatsFragments),
6581     STATS_OFFSET32(stat_EtherStatsJabbers),
6582     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6583     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6584     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6585     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6586     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6587     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6588     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6589     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6590     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6591     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6592     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6593     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6594     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6595     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6596     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6597     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6598     STATS_OFFSET32(stat_XonPauseFramesReceived),
6599     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6600     STATS_OFFSET32(stat_OutXonSent),
6601     STATS_OFFSET32(stat_OutXoffSent),
6602     STATS_OFFSET32(stat_MacControlFramesReceived),
6603     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6604     STATS_OFFSET32(stat_IfInMBUFDiscards),
6605     STATS_OFFSET32(stat_FwRxDrop),
6606 };
6607
6608 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6609  * skipped because of errata.
6610  */
6611 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6612         8,0,8,8,8,8,8,8,8,8,
6613         4,0,4,4,4,4,4,4,4,4,
6614         4,4,4,4,4,4,4,4,4,4,
6615         4,4,4,4,4,4,4,4,4,4,
6616         4,4,4,4,4,4,
6617 };
6618
6619 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6620         8,0,8,8,8,8,8,8,8,8,
6621         4,4,4,4,4,4,4,4,4,4,
6622         4,4,4,4,4,4,4,4,4,4,
6623         4,4,4,4,4,4,4,4,4,4,
6624         4,4,4,4,4,4,
6625 };
6626
6627 #define BNX2_NUM_TESTS 6
6628
6629 static struct {
6630         char string[ETH_GSTRING_LEN];
6631 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6632         { "register_test (offline)" },
6633         { "memory_test (offline)" },
6634         { "loopback_test (offline)" },
6635         { "nvram_test (online)" },
6636         { "interrupt_test (online)" },
6637         { "link_test (online)" },
6638 };
6639
6640 static int
6641 bnx2_get_sset_count(struct net_device *dev, int sset)
6642 {
6643         switch (sset) {
6644         case ETH_SS_TEST:
6645                 return BNX2_NUM_TESTS;
6646         case ETH_SS_STATS:
6647                 return BNX2_NUM_STATS;
6648         default:
6649                 return -EOPNOTSUPP;
6650         }
6651 }
6652
6653 static void
6654 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6655 {
6656         struct bnx2 *bp = netdev_priv(dev);
6657
6658         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6659         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6660                 int i;
6661
6662                 bnx2_netif_stop(bp);
6663                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6664                 bnx2_free_skbs(bp);
6665
6666                 if (bnx2_test_registers(bp) != 0) {
6667                         buf[0] = 1;
6668                         etest->flags |= ETH_TEST_FL_FAILED;
6669                 }
6670                 if (bnx2_test_memory(bp) != 0) {
6671                         buf[1] = 1;
6672                         etest->flags |= ETH_TEST_FL_FAILED;
6673                 }
6674                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6675                         etest->flags |= ETH_TEST_FL_FAILED;
6676
6677                 if (!netif_running(bp->dev)) {
6678                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6679                 }
6680                 else {
6681                         bnx2_init_nic(bp);
6682                         bnx2_netif_start(bp);
6683                 }
6684
6685                 /* wait for link up */
6686                 for (i = 0; i < 7; i++) {
6687                         if (bp->link_up)
6688                                 break;
6689                         msleep_interruptible(1000);
6690                 }
6691         }
6692
6693         if (bnx2_test_nvram(bp) != 0) {
6694                 buf[3] = 1;
6695                 etest->flags |= ETH_TEST_FL_FAILED;
6696         }
6697         if (bnx2_test_intr(bp) != 0) {
6698                 buf[4] = 1;
6699                 etest->flags |= ETH_TEST_FL_FAILED;
6700         }
6701
6702         if (bnx2_test_link(bp) != 0) {
6703                 buf[5] = 1;
6704                 etest->flags |= ETH_TEST_FL_FAILED;
6705
6706         }
6707 }
6708
6709 static void
6710 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6711 {
6712         switch (stringset) {
6713         case ETH_SS_STATS:
6714                 memcpy(buf, bnx2_stats_str_arr,
6715                         sizeof(bnx2_stats_str_arr));
6716                 break;
6717         case ETH_SS_TEST:
6718                 memcpy(buf, bnx2_tests_str_arr,
6719                         sizeof(bnx2_tests_str_arr));
6720                 break;
6721         }
6722 }
6723
6724 static void
6725 bnx2_get_ethtool_stats(struct net_device *dev,
6726                 struct ethtool_stats *stats, u64 *buf)
6727 {
6728         struct bnx2 *bp = netdev_priv(dev);
6729         int i;
6730         u32 *hw_stats = (u32 *) bp->stats_blk;
6731         u8 *stats_len_arr = NULL;
6732
6733         if (hw_stats == NULL) {
6734                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6735                 return;
6736         }
6737
6738         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6739             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6740             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6741             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6742                 stats_len_arr = bnx2_5706_stats_len_arr;
6743         else
6744                 stats_len_arr = bnx2_5708_stats_len_arr;
6745
6746         for (i = 0; i < BNX2_NUM_STATS; i++) {
6747                 if (stats_len_arr[i] == 0) {
6748                         /* skip this counter */
6749                         buf[i] = 0;
6750                         continue;
6751                 }
6752                 if (stats_len_arr[i] == 4) {
6753                         /* 4-byte counter */
6754                         buf[i] = (u64)
6755                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6756                         continue;
6757                 }
6758                 /* 8-byte counter */
6759                 buf[i] = (((u64) *(hw_stats +
6760                                         bnx2_stats_offset_arr[i])) << 32) +
6761                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6762         }
6763 }
6764
6765 static int
6766 bnx2_phys_id(struct net_device *dev, u32 data)
6767 {
6768         struct bnx2 *bp = netdev_priv(dev);
6769         int i;
6770         u32 save;
6771
6772         if (data == 0)
6773                 data = 2;
6774
6775         save = REG_RD(bp, BNX2_MISC_CFG);
6776         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6777
6778         for (i = 0; i < (data * 2); i++) {
6779                 if ((i % 2) == 0) {
6780                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6781                 }
6782                 else {
6783                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6784                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6785                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6786                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6787                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6788                                 BNX2_EMAC_LED_TRAFFIC);
6789                 }
6790                 msleep_interruptible(500);
6791                 if (signal_pending(current))
6792                         break;
6793         }
6794         REG_WR(bp, BNX2_EMAC_LED, 0);
6795         REG_WR(bp, BNX2_MISC_CFG, save);
6796         return 0;
6797 }
6798
6799 static int
6800 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6801 {
6802         struct bnx2 *bp = netdev_priv(dev);
6803
6804         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6805                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6806         else
6807                 return (ethtool_op_set_tx_csum(dev, data));
6808 }
6809
6810 static const struct ethtool_ops bnx2_ethtool_ops = {
6811         .get_settings           = bnx2_get_settings,
6812         .set_settings           = bnx2_set_settings,
6813         .get_drvinfo            = bnx2_get_drvinfo,
6814         .get_regs_len           = bnx2_get_regs_len,
6815         .get_regs               = bnx2_get_regs,
6816         .get_wol                = bnx2_get_wol,
6817         .set_wol                = bnx2_set_wol,
6818         .nway_reset             = bnx2_nway_reset,
6819         .get_link               = ethtool_op_get_link,
6820         .get_eeprom_len         = bnx2_get_eeprom_len,
6821         .get_eeprom             = bnx2_get_eeprom,
6822         .set_eeprom             = bnx2_set_eeprom,
6823         .get_coalesce           = bnx2_get_coalesce,
6824         .set_coalesce           = bnx2_set_coalesce,
6825         .get_ringparam          = bnx2_get_ringparam,
6826         .set_ringparam          = bnx2_set_ringparam,
6827         .get_pauseparam         = bnx2_get_pauseparam,
6828         .set_pauseparam         = bnx2_set_pauseparam,
6829         .get_rx_csum            = bnx2_get_rx_csum,
6830         .set_rx_csum            = bnx2_set_rx_csum,
6831         .set_tx_csum            = bnx2_set_tx_csum,
6832         .set_sg                 = ethtool_op_set_sg,
6833         .set_tso                = bnx2_set_tso,
6834         .self_test              = bnx2_self_test,
6835         .get_strings            = bnx2_get_strings,
6836         .phys_id                = bnx2_phys_id,
6837         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6838         .get_sset_count         = bnx2_get_sset_count,
6839 };
6840
6841 /* Called with rtnl_lock */
6842 static int
6843 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6844 {
6845         struct mii_ioctl_data *data = if_mii(ifr);
6846         struct bnx2 *bp = netdev_priv(dev);
6847         int err;
6848
6849         switch(cmd) {
6850         case SIOCGMIIPHY:
6851                 data->phy_id = bp->phy_addr;
6852
6853                 /* fallthru */
6854         case SIOCGMIIREG: {
6855                 u32 mii_regval;
6856
6857                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6858                         return -EOPNOTSUPP;
6859
6860                 if (!netif_running(dev))
6861                         return -EAGAIN;
6862
6863                 spin_lock_bh(&bp->phy_lock);
6864                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6865                 spin_unlock_bh(&bp->phy_lock);
6866
6867                 data->val_out = mii_regval;
6868
6869                 return err;
6870         }
6871
6872         case SIOCSMIIREG:
6873                 if (!capable(CAP_NET_ADMIN))
6874                         return -EPERM;
6875
6876                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6877                         return -EOPNOTSUPP;
6878
6879                 if (!netif_running(dev))
6880                         return -EAGAIN;
6881
6882                 spin_lock_bh(&bp->phy_lock);
6883                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6884                 spin_unlock_bh(&bp->phy_lock);
6885
6886                 return err;
6887
6888         default:
6889                 /* do nothing */
6890                 break;
6891         }
6892         return -EOPNOTSUPP;
6893 }
6894
6895 /* Called with rtnl_lock */
6896 static int
6897 bnx2_change_mac_addr(struct net_device *dev, void *p)
6898 {
6899         struct sockaddr *addr = p;
6900         struct bnx2 *bp = netdev_priv(dev);
6901
6902         if (!is_valid_ether_addr(addr->sa_data))
6903                 return -EINVAL;
6904
6905         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6906         if (netif_running(dev))
6907                 bnx2_set_mac_addr(bp);
6908
6909         return 0;
6910 }
6911
6912 /* Called with rtnl_lock */
6913 static int
6914 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6915 {
6916         struct bnx2 *bp = netdev_priv(dev);
6917
6918         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6919                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6920                 return -EINVAL;
6921
6922         dev->mtu = new_mtu;
6923         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6924 }
6925
6926 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6927 static void
6928 poll_bnx2(struct net_device *dev)
6929 {
6930         struct bnx2 *bp = netdev_priv(dev);
6931
6932         disable_irq(bp->pdev->irq);
6933         bnx2_interrupt(bp->pdev->irq, dev);
6934         enable_irq(bp->pdev->irq);
6935 }
6936 #endif
6937
6938 static void __devinit
6939 bnx2_get_5709_media(struct bnx2 *bp)
6940 {
6941         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6942         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6943         u32 strap;
6944
6945         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6946                 return;
6947         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6948                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6949                 return;
6950         }
6951
6952         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6953                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6954         else
6955                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6956
6957         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6958                 switch (strap) {
6959                 case 0x4:
6960                 case 0x5:
6961                 case 0x6:
6962                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6963                         return;
6964                 }
6965         } else {
6966                 switch (strap) {
6967                 case 0x1:
6968                 case 0x2:
6969                 case 0x4:
6970                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6971                         return;
6972                 }
6973         }
6974 }
6975
6976 static void __devinit
6977 bnx2_get_pci_speed(struct bnx2 *bp)
6978 {
6979         u32 reg;
6980
6981         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6982         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6983                 u32 clkreg;
6984
6985                 bp->flags |= BNX2_FLAG_PCIX;
6986
6987                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6988
6989                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6990                 switch (clkreg) {
6991                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6992                         bp->bus_speed_mhz = 133;
6993                         break;
6994
6995                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6996                         bp->bus_speed_mhz = 100;
6997                         break;
6998
6999                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7000                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7001                         bp->bus_speed_mhz = 66;
7002                         break;
7003
7004                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7005                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7006                         bp->bus_speed_mhz = 50;
7007                         break;
7008
7009                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7010                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7011                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7012                         bp->bus_speed_mhz = 33;
7013                         break;
7014                 }
7015         }
7016         else {
7017                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7018                         bp->bus_speed_mhz = 66;
7019                 else
7020                         bp->bus_speed_mhz = 33;
7021         }
7022
7023         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7024                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7025
7026 }
7027
7028 static int __devinit
7029 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7030 {
7031         struct bnx2 *bp;
7032         unsigned long mem_len;
7033         int rc, i, j;
7034         u32 reg;
7035         u64 dma_mask, persist_dma_mask;
7036
7037         SET_NETDEV_DEV(dev, &pdev->dev);
7038         bp = netdev_priv(dev);
7039
7040         bp->flags = 0;
7041         bp->phy_flags = 0;
7042
7043         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7044         rc = pci_enable_device(pdev);
7045         if (rc) {
7046                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7047                 goto err_out;
7048         }
7049
7050         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7051                 dev_err(&pdev->dev,
7052                         "Cannot find PCI device base address, aborting.\n");
7053                 rc = -ENODEV;
7054                 goto err_out_disable;
7055         }
7056
7057         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7058         if (rc) {
7059                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7060                 goto err_out_disable;
7061         }
7062
7063         pci_set_master(pdev);
7064
7065         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7066         if (bp->pm_cap == 0) {
7067                 dev_err(&pdev->dev,
7068                         "Cannot find power management capability, aborting.\n");
7069                 rc = -EIO;
7070                 goto err_out_release;
7071         }
7072
7073         bp->dev = dev;
7074         bp->pdev = pdev;
7075
7076         spin_lock_init(&bp->phy_lock);
7077         spin_lock_init(&bp->indirect_lock);
7078         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7079
7080         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7081         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7082         dev->mem_end = dev->mem_start + mem_len;
7083         dev->irq = pdev->irq;
7084
7085         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7086
7087         if (!bp->regview) {
7088                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7089                 rc = -ENOMEM;
7090                 goto err_out_release;
7091         }
7092
7093         /* Configure byte swap and enable write to the reg_window registers.
7094          * Rely on CPU to do target byte swapping on big endian systems
7095          * The chip's target access swapping will not swap all accesses
7096          */
7097         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7098                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7099                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7100
7101         bnx2_set_power_state(bp, PCI_D0);
7102
7103         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7104
7105         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7106                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7107                         dev_err(&pdev->dev,
7108                                 "Cannot find PCIE capability, aborting.\n");
7109                         rc = -EIO;
7110                         goto err_out_unmap;
7111                 }
7112                 bp->flags |= BNX2_FLAG_PCIE;
7113                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7114                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7115         } else {
7116                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7117                 if (bp->pcix_cap == 0) {
7118                         dev_err(&pdev->dev,
7119                                 "Cannot find PCIX capability, aborting.\n");
7120                         rc = -EIO;
7121                         goto err_out_unmap;
7122                 }
7123         }
7124
7125         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7126                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7127                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7128         }
7129
7130         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7131                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7132                         bp->flags |= BNX2_FLAG_MSI_CAP;
7133         }
7134
7135         /* 5708 cannot support DMA addresses > 40-bit.  */
7136         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7137                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7138         else
7139                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7140
7141         /* Configure DMA attributes. */
7142         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7143                 dev->features |= NETIF_F_HIGHDMA;
7144                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7145                 if (rc) {
7146                         dev_err(&pdev->dev,
7147                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7148                         goto err_out_unmap;
7149                 }
7150         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7151                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7152                 goto err_out_unmap;
7153         }
7154
7155         if (!(bp->flags & BNX2_FLAG_PCIE))
7156                 bnx2_get_pci_speed(bp);
7157
7158         /* 5706A0 may falsely detect SERR and PERR. */
7159         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7160                 reg = REG_RD(bp, PCI_COMMAND);
7161                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7162                 REG_WR(bp, PCI_COMMAND, reg);
7163         }
7164         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7165                 !(bp->flags & BNX2_FLAG_PCIX)) {
7166
7167                 dev_err(&pdev->dev,
7168                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7169                 goto err_out_unmap;
7170         }
7171
7172         bnx2_init_nvram(bp);
7173
7174         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7175
7176         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7177             BNX2_SHM_HDR_SIGNATURE_SIG) {
7178                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7179
7180                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7181         } else
7182                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7183
7184         /* Get the permanent MAC address.  First we need to make sure the
7185          * firmware is actually running.
7186          */
7187         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7188
7189         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7190             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7191                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7192                 rc = -ENODEV;
7193                 goto err_out_unmap;
7194         }
7195
7196         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7197         for (i = 0, j = 0; i < 3; i++) {
7198                 u8 num, k, skip0;
7199
7200                 num = (u8) (reg >> (24 - (i * 8)));
7201                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7202                         if (num >= k || !skip0 || k == 1) {
7203                                 bp->fw_version[j++] = (num / k) + '0';
7204                                 skip0 = 0;
7205                         }
7206                 }
7207                 if (i != 2)
7208                         bp->fw_version[j++] = '.';
7209         }
7210         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7211         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7212                 bp->wol = 1;
7213
7214         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7215                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7216
7217                 for (i = 0; i < 30; i++) {
7218                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7219                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7220                                 break;
7221                         msleep(10);
7222                 }
7223         }
7224         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7225         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7226         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7227             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7228                 int i;
7229                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7230
7231                 bp->fw_version[j++] = ' ';
7232                 for (i = 0; i < 3; i++) {
7233                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7234                         reg = swab32(reg);
7235                         memcpy(&bp->fw_version[j], &reg, 4);
7236                         j += 4;
7237                 }
7238         }
7239
7240         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7241         bp->mac_addr[0] = (u8) (reg >> 8);
7242         bp->mac_addr[1] = (u8) reg;
7243
7244         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7245         bp->mac_addr[2] = (u8) (reg >> 24);
7246         bp->mac_addr[3] = (u8) (reg >> 16);
7247         bp->mac_addr[4] = (u8) (reg >> 8);
7248         bp->mac_addr[5] = (u8) reg;
7249
7250         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7251
7252         bp->tx_ring_size = MAX_TX_DESC_CNT;
7253         bnx2_set_rx_ring_size(bp, 255);
7254
7255         bp->rx_csum = 1;
7256
7257         bp->tx_quick_cons_trip_int = 20;
7258         bp->tx_quick_cons_trip = 20;
7259         bp->tx_ticks_int = 80;
7260         bp->tx_ticks = 80;
7261
7262         bp->rx_quick_cons_trip_int = 6;
7263         bp->rx_quick_cons_trip = 6;
7264         bp->rx_ticks_int = 18;
7265         bp->rx_ticks = 18;
7266
7267         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7268
7269         bp->timer_interval =  HZ;
7270         bp->current_interval =  HZ;
7271
7272         bp->phy_addr = 1;
7273
7274         /* Disable WOL support if we are running on a SERDES chip. */
7275         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7276                 bnx2_get_5709_media(bp);
7277         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7278                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7279
7280         bp->phy_port = PORT_TP;
7281         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7282                 bp->phy_port = PORT_FIBRE;
7283                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7284                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7285                         bp->flags |= BNX2_FLAG_NO_WOL;
7286                         bp->wol = 0;
7287                 }
7288                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
7289                         bp->phy_addr = 2;
7290                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7291                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7292                 }
7293                 bnx2_init_remote_phy(bp);
7294
7295         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7296                    CHIP_NUM(bp) == CHIP_NUM_5708)
7297                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7298         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7299                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7300                   CHIP_REV(bp) == CHIP_REV_Bx))
7301                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7302
7303         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7304             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7305             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7306                 bp->flags |= BNX2_FLAG_NO_WOL;
7307                 bp->wol = 0;
7308         }
7309
7310         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7311                 bp->tx_quick_cons_trip_int =
7312                         bp->tx_quick_cons_trip;
7313                 bp->tx_ticks_int = bp->tx_ticks;
7314                 bp->rx_quick_cons_trip_int =
7315                         bp->rx_quick_cons_trip;
7316                 bp->rx_ticks_int = bp->rx_ticks;
7317                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7318                 bp->com_ticks_int = bp->com_ticks;
7319                 bp->cmd_ticks_int = bp->cmd_ticks;
7320         }
7321
7322         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7323          *
7324          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7325          * with byte enables disabled on the unused 32-bit word.  This is legal
7326          * but causes problems on the AMD 8132 which will eventually stop
7327          * responding after a while.
7328          *
7329          * AMD believes this incompatibility is unique to the 5706, and
7330          * prefers to locally disable MSI rather than globally disabling it.
7331          */
7332         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7333                 struct pci_dev *amd_8132 = NULL;
7334
7335                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7336                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7337                                                   amd_8132))) {
7338
7339                         if (amd_8132->revision >= 0x10 &&
7340                             amd_8132->revision <= 0x13) {
7341                                 disable_msi = 1;
7342                                 pci_dev_put(amd_8132);
7343                                 break;
7344                         }
7345                 }
7346         }
7347
7348         bnx2_set_default_link(bp);
7349         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7350
7351         init_timer(&bp->timer);
7352         bp->timer.expires = RUN_AT(bp->timer_interval);
7353         bp->timer.data = (unsigned long) bp;
7354         bp->timer.function = bnx2_timer;
7355
7356         return 0;
7357
7358 err_out_unmap:
7359         if (bp->regview) {
7360                 iounmap(bp->regview);
7361                 bp->regview = NULL;
7362         }
7363
7364 err_out_release:
7365         pci_release_regions(pdev);
7366
7367 err_out_disable:
7368         pci_disable_device(pdev);
7369         pci_set_drvdata(pdev, NULL);
7370
7371 err_out:
7372         return rc;
7373 }
7374
7375 static char * __devinit
7376 bnx2_bus_string(struct bnx2 *bp, char *str)
7377 {
7378         char *s = str;
7379
7380         if (bp->flags & BNX2_FLAG_PCIE) {
7381                 s += sprintf(s, "PCI Express");
7382         } else {
7383                 s += sprintf(s, "PCI");
7384                 if (bp->flags & BNX2_FLAG_PCIX)
7385                         s += sprintf(s, "-X");
7386                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7387                         s += sprintf(s, " 32-bit");
7388                 else
7389                         s += sprintf(s, " 64-bit");
7390                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7391         }
7392         return str;
7393 }
7394
7395 static void __devinit
7396 bnx2_init_napi(struct bnx2 *bp)
7397 {
7398         int i;
7399         struct bnx2_napi *bnapi;
7400
7401         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7402                 bnapi = &bp->bnx2_napi[i];
7403                 bnapi->bp = bp;
7404         }
7405         netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7406         netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7407                        64);
7408 }
7409
7410 static int __devinit
7411 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7412 {
7413         static int version_printed = 0;
7414         struct net_device *dev = NULL;
7415         struct bnx2 *bp;
7416         int rc;
7417         char str[40];
7418         DECLARE_MAC_BUF(mac);
7419
7420         if (version_printed++ == 0)
7421                 printk(KERN_INFO "%s", version);
7422
7423         /* dev zeroed in init_etherdev */
7424         dev = alloc_etherdev(sizeof(*bp));
7425
7426         if (!dev)
7427                 return -ENOMEM;
7428
7429         rc = bnx2_init_board(pdev, dev);
7430         if (rc < 0) {
7431                 free_netdev(dev);
7432                 return rc;
7433         }
7434
7435         dev->open = bnx2_open;
7436         dev->hard_start_xmit = bnx2_start_xmit;
7437         dev->stop = bnx2_close;
7438         dev->get_stats = bnx2_get_stats;
7439         dev->set_multicast_list = bnx2_set_rx_mode;
7440         dev->do_ioctl = bnx2_ioctl;
7441         dev->set_mac_address = bnx2_change_mac_addr;
7442         dev->change_mtu = bnx2_change_mtu;
7443         dev->tx_timeout = bnx2_tx_timeout;
7444         dev->watchdog_timeo = TX_TIMEOUT;
7445 #ifdef BCM_VLAN
7446         dev->vlan_rx_register = bnx2_vlan_rx_register;
7447 #endif
7448         dev->ethtool_ops = &bnx2_ethtool_ops;
7449
7450         bp = netdev_priv(dev);
7451         bnx2_init_napi(bp);
7452
7453 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7454         dev->poll_controller = poll_bnx2;
7455 #endif
7456
7457         pci_set_drvdata(pdev, dev);
7458
7459         memcpy(dev->dev_addr, bp->mac_addr, 6);
7460         memcpy(dev->perm_addr, bp->mac_addr, 6);
7461         bp->name = board_info[ent->driver_data].name;
7462
7463         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7464         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7465                 dev->features |= NETIF_F_IPV6_CSUM;
7466
7467 #ifdef BCM_VLAN
7468         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7469 #endif
7470         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7471         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7472                 dev->features |= NETIF_F_TSO6;
7473
7474         if ((rc = register_netdev(dev))) {
7475                 dev_err(&pdev->dev, "Cannot register net device\n");
7476                 if (bp->regview)
7477                         iounmap(bp->regview);
7478                 pci_release_regions(pdev);
7479                 pci_disable_device(pdev);
7480                 pci_set_drvdata(pdev, NULL);
7481                 free_netdev(dev);
7482                 return rc;
7483         }
7484
7485         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7486                 "IRQ %d, node addr %s\n",
7487                 dev->name,
7488                 bp->name,
7489                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7490                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7491                 bnx2_bus_string(bp, str),
7492                 dev->base_addr,
7493                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7494
7495         return 0;
7496 }
7497
7498 static void __devexit
7499 bnx2_remove_one(struct pci_dev *pdev)
7500 {
7501         struct net_device *dev = pci_get_drvdata(pdev);
7502         struct bnx2 *bp = netdev_priv(dev);
7503
7504         flush_scheduled_work();
7505
7506         unregister_netdev(dev);
7507
7508         if (bp->regview)
7509                 iounmap(bp->regview);
7510
7511         free_netdev(dev);
7512         pci_release_regions(pdev);
7513         pci_disable_device(pdev);
7514         pci_set_drvdata(pdev, NULL);
7515 }
7516
7517 static int
7518 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7519 {
7520         struct net_device *dev = pci_get_drvdata(pdev);
7521         struct bnx2 *bp = netdev_priv(dev);
7522         u32 reset_code;
7523
7524         /* PCI register 4 needs to be saved whether netif_running() or not.
7525          * MSI address and data need to be saved if using MSI and
7526          * netif_running().
7527          */
7528         pci_save_state(pdev);
7529         if (!netif_running(dev))
7530                 return 0;
7531
7532         flush_scheduled_work();
7533         bnx2_netif_stop(bp);
7534         netif_device_detach(dev);
7535         del_timer_sync(&bp->timer);
7536         if (bp->flags & BNX2_FLAG_NO_WOL)
7537                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7538         else if (bp->wol)
7539                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7540         else
7541                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7542         bnx2_reset_chip(bp, reset_code);
7543         bnx2_free_skbs(bp);
7544         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7545         return 0;
7546 }
7547
7548 static int
7549 bnx2_resume(struct pci_dev *pdev)
7550 {
7551         struct net_device *dev = pci_get_drvdata(pdev);
7552         struct bnx2 *bp = netdev_priv(dev);
7553
7554         pci_restore_state(pdev);
7555         if (!netif_running(dev))
7556                 return 0;
7557
7558         bnx2_set_power_state(bp, PCI_D0);
7559         netif_device_attach(dev);
7560         bnx2_init_nic(bp);
7561         bnx2_netif_start(bp);
7562         return 0;
7563 }
7564
7565 static struct pci_driver bnx2_pci_driver = {
7566         .name           = DRV_MODULE_NAME,
7567         .id_table       = bnx2_pci_tbl,
7568         .probe          = bnx2_init_one,
7569         .remove         = __devexit_p(bnx2_remove_one),
7570         .suspend        = bnx2_suspend,
7571         .resume         = bnx2_resume,
7572 };
7573
7574 static int __init bnx2_init(void)
7575 {
7576         return pci_register_driver(&bnx2_pci_driver);
7577 }
7578
7579 static void __exit bnx2_cleanup(void)
7580 {
7581         pci_unregister_driver(&bnx2_pci_driver);
7582 }
7583
7584 module_init(bnx2_init);
7585 module_exit(bnx2_cleanup);
7586
7587
7588