[BNX2]: Fix TSO problem with small MSS.
[sfrench/cifs-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "1.5.10"
58 #define DRV_MODULE_RELDATE      "May 1, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT  (5*HZ)
64
65 static const char version[] __devinitdata =
66         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79         BCM5706 = 0,
80         NC370T,
81         NC370I,
82         BCM5706S,
83         NC370F,
84         BCM5708,
85         BCM5708S,
86         BCM5709,
87         BCM5709S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92         char *name;
93 } board_info[] __devinitdata = {
94         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95         { "HP NC370T Multifunction Gigabit Server Adapter" },
96         { "HP NC370i Multifunction Gigabit Server Adapter" },
97         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98         { "HP NC370F Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
103         };
104
105 static struct pci_device_id bnx2_pci_tbl[] = {
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
124         { 0, }
125 };
126
127 static struct flash_spec flash_table[] =
128 {
129         /* Slow EEPROM */
130         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133          "EEPROM - slow"},
134         /* Expansion entry 0001 */
135         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138          "Entry 0001"},
139         /* Saifun SA25F010 (non-buffered flash) */
140         /* strap, cfg1, & write1 need updates */
141         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144          "Non-buffered flash (128kB)"},
145         /* Saifun SA25F020 (non-buffered flash) */
146         /* strap, cfg1, & write1 need updates */
147         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150          "Non-buffered flash (256kB)"},
151         /* Expansion entry 0100 */
152         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155          "Entry 0100"},
156         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166         /* Saifun SA25F005 (non-buffered flash) */
167         /* strap, cfg1, & write1 need updates */
168         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171          "Non-buffered flash (64kB)"},
172         /* Fast EEPROM */
173         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176          "EEPROM - fast"},
177         /* Expansion entry 1001 */
178         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181          "Entry 1001"},
182         /* Expansion entry 1010 */
183         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186          "Entry 1010"},
187         /* ATMEL AT45DB011B (buffered flash) */
188         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191          "Buffered flash (128kB)"},
192         /* Expansion entry 1100 */
193         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196          "Entry 1100"},
197         /* Expansion entry 1101 */
198         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201          "Entry 1101"},
202         /* Ateml Expansion entry 1110 */
203         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1110 (Atmel)"},
207         /* ATMEL AT45DB021B (buffered flash) */
208         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211          "Buffered flash (256kB)"},
212 };
213
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217 {
218         u32 diff;
219
220         smp_mb();
221
222         /* The ring uses 256 indices for 255 entries, one of them
223          * needs to be skipped.
224          */
225         diff = bp->tx_prod - bp->tx_cons;
226         if (unlikely(diff >= TX_DESC_CNT)) {
227                 diff &= 0xffff;
228                 if (diff == TX_DESC_CNT)
229                         diff = MAX_TX_DESC_CNT;
230         }
231         return (bp->tx_ring_size - diff);
232 }
233
234 static u32
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236 {
237         u32 val;
238
239         spin_lock_bh(&bp->indirect_lock);
240         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242         spin_unlock_bh(&bp->indirect_lock);
243         return val;
244 }
245
246 static void
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248 {
249         spin_lock_bh(&bp->indirect_lock);
250         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252         spin_unlock_bh(&bp->indirect_lock);
253 }
254
255 static void
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257 {
258         offset += cid_addr;
259         spin_lock_bh(&bp->indirect_lock);
260         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261                 int i;
262
263                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266                 for (i = 0; i < 5; i++) {
267                         u32 val;
268                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270                                 break;
271                         udelay(5);
272                 }
273         } else {
274                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275                 REG_WR(bp, BNX2_CTX_DATA, val);
276         }
277         spin_unlock_bh(&bp->indirect_lock);
278 }
279
280 static int
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282 {
283         u32 val1;
284         int i, ret;
285
286         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293                 udelay(40);
294         }
295
296         val1 = (bp->phy_addr << 21) | (reg << 16) |
297                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298                 BNX2_EMAC_MDIO_COMM_START_BUSY;
299         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301         for (i = 0; i < 50; i++) {
302                 udelay(10);
303
304                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306                         udelay(5);
307
308                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311                         break;
312                 }
313         }
314
315         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316                 *val = 0x0;
317                 ret = -EBUSY;
318         }
319         else {
320                 *val = val1;
321                 ret = 0;
322         }
323
324         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331                 udelay(40);
332         }
333
334         return ret;
335 }
336
337 static int
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339 {
340         u32 val1;
341         int i, ret;
342
343         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350                 udelay(40);
351         }
352
353         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
357
358         for (i = 0; i < 50; i++) {
359                 udelay(10);
360
361                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363                         udelay(5);
364                         break;
365                 }
366         }
367
368         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369                 ret = -EBUSY;
370         else
371                 ret = 0;
372
373         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380                 udelay(40);
381         }
382
383         return ret;
384 }
385
386 static void
387 bnx2_disable_int(struct bnx2 *bp)
388 {
389         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392 }
393
394 static void
395 bnx2_enable_int(struct bnx2 *bp)
396 {
397         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
404         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
405 }
406
407 static void
408 bnx2_disable_int_sync(struct bnx2 *bp)
409 {
410         atomic_inc(&bp->intr_sem);
411         bnx2_disable_int(bp);
412         synchronize_irq(bp->pdev->irq);
413 }
414
415 static void
416 bnx2_netif_stop(struct bnx2 *bp)
417 {
418         bnx2_disable_int_sync(bp);
419         if (netif_running(bp->dev)) {
420                 netif_poll_disable(bp->dev);
421                 netif_tx_disable(bp->dev);
422                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423         }
424 }
425
426 static void
427 bnx2_netif_start(struct bnx2 *bp)
428 {
429         if (atomic_dec_and_test(&bp->intr_sem)) {
430                 if (netif_running(bp->dev)) {
431                         netif_wake_queue(bp->dev);
432                         netif_poll_enable(bp->dev);
433                         bnx2_enable_int(bp);
434                 }
435         }
436 }
437
438 static void
439 bnx2_free_mem(struct bnx2 *bp)
440 {
441         int i;
442
443         for (i = 0; i < bp->ctx_pages; i++) {
444                 if (bp->ctx_blk[i]) {
445                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446                                             bp->ctx_blk[i],
447                                             bp->ctx_blk_mapping[i]);
448                         bp->ctx_blk[i] = NULL;
449                 }
450         }
451         if (bp->status_blk) {
452                 pci_free_consistent(bp->pdev, bp->status_stats_size,
453                                     bp->status_blk, bp->status_blk_mapping);
454                 bp->status_blk = NULL;
455                 bp->stats_blk = NULL;
456         }
457         if (bp->tx_desc_ring) {
458                 pci_free_consistent(bp->pdev,
459                                     sizeof(struct tx_bd) * TX_DESC_CNT,
460                                     bp->tx_desc_ring, bp->tx_desc_mapping);
461                 bp->tx_desc_ring = NULL;
462         }
463         kfree(bp->tx_buf_ring);
464         bp->tx_buf_ring = NULL;
465         for (i = 0; i < bp->rx_max_ring; i++) {
466                 if (bp->rx_desc_ring[i])
467                         pci_free_consistent(bp->pdev,
468                                             sizeof(struct rx_bd) * RX_DESC_CNT,
469                                             bp->rx_desc_ring[i],
470                                             bp->rx_desc_mapping[i]);
471                 bp->rx_desc_ring[i] = NULL;
472         }
473         vfree(bp->rx_buf_ring);
474         bp->rx_buf_ring = NULL;
475 }
476
477 static int
478 bnx2_alloc_mem(struct bnx2 *bp)
479 {
480         int i, status_blk_size;
481
482         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483                                   GFP_KERNEL);
484         if (bp->tx_buf_ring == NULL)
485                 return -ENOMEM;
486
487         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488                                                 sizeof(struct tx_bd) *
489                                                 TX_DESC_CNT,
490                                                 &bp->tx_desc_mapping);
491         if (bp->tx_desc_ring == NULL)
492                 goto alloc_mem_err;
493
494         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495                                   bp->rx_max_ring);
496         if (bp->rx_buf_ring == NULL)
497                 goto alloc_mem_err;
498
499         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500                                    bp->rx_max_ring);
501
502         for (i = 0; i < bp->rx_max_ring; i++) {
503                 bp->rx_desc_ring[i] =
504                         pci_alloc_consistent(bp->pdev,
505                                              sizeof(struct rx_bd) * RX_DESC_CNT,
506                                              &bp->rx_desc_mapping[i]);
507                 if (bp->rx_desc_ring[i] == NULL)
508                         goto alloc_mem_err;
509
510         }
511
512         /* Combine status and statistics blocks into one allocation. */
513         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514         bp->status_stats_size = status_blk_size +
515                                 sizeof(struct statistics_block);
516
517         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518                                               &bp->status_blk_mapping);
519         if (bp->status_blk == NULL)
520                 goto alloc_mem_err;
521
522         memset(bp->status_blk, 0, bp->status_stats_size);
523
524         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525                                   status_blk_size);
526
527         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
528
529         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531                 if (bp->ctx_pages == 0)
532                         bp->ctx_pages = 1;
533                 for (i = 0; i < bp->ctx_pages; i++) {
534                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535                                                 BCM_PAGE_SIZE,
536                                                 &bp->ctx_blk_mapping[i]);
537                         if (bp->ctx_blk[i] == NULL)
538                                 goto alloc_mem_err;
539                 }
540         }
541         return 0;
542
543 alloc_mem_err:
544         bnx2_free_mem(bp);
545         return -ENOMEM;
546 }
547
548 static void
549 bnx2_report_fw_link(struct bnx2 *bp)
550 {
551         u32 fw_link_status = 0;
552
553         if (bp->link_up) {
554                 u32 bmsr;
555
556                 switch (bp->line_speed) {
557                 case SPEED_10:
558                         if (bp->duplex == DUPLEX_HALF)
559                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
560                         else
561                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
562                         break;
563                 case SPEED_100:
564                         if (bp->duplex == DUPLEX_HALF)
565                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
566                         else
567                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
568                         break;
569                 case SPEED_1000:
570                         if (bp->duplex == DUPLEX_HALF)
571                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
572                         else
573                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
574                         break;
575                 case SPEED_2500:
576                         if (bp->duplex == DUPLEX_HALF)
577                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
578                         else
579                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
580                         break;
581                 }
582
583                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
584
585                 if (bp->autoneg) {
586                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
587
588                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
590
591                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
594                         else
595                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
596                 }
597         }
598         else
599                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
600
601         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602 }
603
604 static void
605 bnx2_report_link(struct bnx2 *bp)
606 {
607         if (bp->link_up) {
608                 netif_carrier_on(bp->dev);
609                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
610
611                 printk("%d Mbps ", bp->line_speed);
612
613                 if (bp->duplex == DUPLEX_FULL)
614                         printk("full duplex");
615                 else
616                         printk("half duplex");
617
618                 if (bp->flow_ctrl) {
619                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
620                                 printk(", receive ");
621                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
622                                         printk("& transmit ");
623                         }
624                         else {
625                                 printk(", transmit ");
626                         }
627                         printk("flow control ON");
628                 }
629                 printk("\n");
630         }
631         else {
632                 netif_carrier_off(bp->dev);
633                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
634         }
635
636         bnx2_report_fw_link(bp);
637 }
638
639 static void
640 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
641 {
642         u32 local_adv, remote_adv;
643
644         bp->flow_ctrl = 0;
645         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
646                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
647
648                 if (bp->duplex == DUPLEX_FULL) {
649                         bp->flow_ctrl = bp->req_flow_ctrl;
650                 }
651                 return;
652         }
653
654         if (bp->duplex != DUPLEX_FULL) {
655                 return;
656         }
657
658         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
660                 u32 val;
661
662                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664                         bp->flow_ctrl |= FLOW_CTRL_TX;
665                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666                         bp->flow_ctrl |= FLOW_CTRL_RX;
667                 return;
668         }
669
670         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
672
673         if (bp->phy_flags & PHY_SERDES_FLAG) {
674                 u32 new_local_adv = 0;
675                 u32 new_remote_adv = 0;
676
677                 if (local_adv & ADVERTISE_1000XPAUSE)
678                         new_local_adv |= ADVERTISE_PAUSE_CAP;
679                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
681                 if (remote_adv & ADVERTISE_1000XPAUSE)
682                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
683                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
685
686                 local_adv = new_local_adv;
687                 remote_adv = new_remote_adv;
688         }
689
690         /* See Table 28B-3 of 802.3ab-1999 spec. */
691         if (local_adv & ADVERTISE_PAUSE_CAP) {
692                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
694                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
695                         }
696                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697                                 bp->flow_ctrl = FLOW_CTRL_RX;
698                         }
699                 }
700                 else {
701                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
702                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
703                         }
704                 }
705         }
706         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
709
710                         bp->flow_ctrl = FLOW_CTRL_TX;
711                 }
712         }
713 }
714
715 static int
716 bnx2_5709s_linkup(struct bnx2 *bp)
717 {
718         u32 val, speed;
719
720         bp->link_up = 1;
721
722         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
725
726         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727                 bp->line_speed = bp->req_line_speed;
728                 bp->duplex = bp->req_duplex;
729                 return 0;
730         }
731         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
732         switch (speed) {
733                 case MII_BNX2_GP_TOP_AN_SPEED_10:
734                         bp->line_speed = SPEED_10;
735                         break;
736                 case MII_BNX2_GP_TOP_AN_SPEED_100:
737                         bp->line_speed = SPEED_100;
738                         break;
739                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741                         bp->line_speed = SPEED_1000;
742                         break;
743                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744                         bp->line_speed = SPEED_2500;
745                         break;
746         }
747         if (val & MII_BNX2_GP_TOP_AN_FD)
748                 bp->duplex = DUPLEX_FULL;
749         else
750                 bp->duplex = DUPLEX_HALF;
751         return 0;
752 }
753
754 static int
755 bnx2_5708s_linkup(struct bnx2 *bp)
756 {
757         u32 val;
758
759         bp->link_up = 1;
760         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762                 case BCM5708S_1000X_STAT1_SPEED_10:
763                         bp->line_speed = SPEED_10;
764                         break;
765                 case BCM5708S_1000X_STAT1_SPEED_100:
766                         bp->line_speed = SPEED_100;
767                         break;
768                 case BCM5708S_1000X_STAT1_SPEED_1G:
769                         bp->line_speed = SPEED_1000;
770                         break;
771                 case BCM5708S_1000X_STAT1_SPEED_2G5:
772                         bp->line_speed = SPEED_2500;
773                         break;
774         }
775         if (val & BCM5708S_1000X_STAT1_FD)
776                 bp->duplex = DUPLEX_FULL;
777         else
778                 bp->duplex = DUPLEX_HALF;
779
780         return 0;
781 }
782
783 static int
784 bnx2_5706s_linkup(struct bnx2 *bp)
785 {
786         u32 bmcr, local_adv, remote_adv, common;
787
788         bp->link_up = 1;
789         bp->line_speed = SPEED_1000;
790
791         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
792         if (bmcr & BMCR_FULLDPLX) {
793                 bp->duplex = DUPLEX_FULL;
794         }
795         else {
796                 bp->duplex = DUPLEX_HALF;
797         }
798
799         if (!(bmcr & BMCR_ANENABLE)) {
800                 return 0;
801         }
802
803         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
805
806         common = local_adv & remote_adv;
807         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
808
809                 if (common & ADVERTISE_1000XFULL) {
810                         bp->duplex = DUPLEX_FULL;
811                 }
812                 else {
813                         bp->duplex = DUPLEX_HALF;
814                 }
815         }
816
817         return 0;
818 }
819
820 static int
821 bnx2_copper_linkup(struct bnx2 *bp)
822 {
823         u32 bmcr;
824
825         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
826         if (bmcr & BMCR_ANENABLE) {
827                 u32 local_adv, remote_adv, common;
828
829                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
831
832                 common = local_adv & (remote_adv >> 2);
833                 if (common & ADVERTISE_1000FULL) {
834                         bp->line_speed = SPEED_1000;
835                         bp->duplex = DUPLEX_FULL;
836                 }
837                 else if (common & ADVERTISE_1000HALF) {
838                         bp->line_speed = SPEED_1000;
839                         bp->duplex = DUPLEX_HALF;
840                 }
841                 else {
842                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
844
845                         common = local_adv & remote_adv;
846                         if (common & ADVERTISE_100FULL) {
847                                 bp->line_speed = SPEED_100;
848                                 bp->duplex = DUPLEX_FULL;
849                         }
850                         else if (common & ADVERTISE_100HALF) {
851                                 bp->line_speed = SPEED_100;
852                                 bp->duplex = DUPLEX_HALF;
853                         }
854                         else if (common & ADVERTISE_10FULL) {
855                                 bp->line_speed = SPEED_10;
856                                 bp->duplex = DUPLEX_FULL;
857                         }
858                         else if (common & ADVERTISE_10HALF) {
859                                 bp->line_speed = SPEED_10;
860                                 bp->duplex = DUPLEX_HALF;
861                         }
862                         else {
863                                 bp->line_speed = 0;
864                                 bp->link_up = 0;
865                         }
866                 }
867         }
868         else {
869                 if (bmcr & BMCR_SPEED100) {
870                         bp->line_speed = SPEED_100;
871                 }
872                 else {
873                         bp->line_speed = SPEED_10;
874                 }
875                 if (bmcr & BMCR_FULLDPLX) {
876                         bp->duplex = DUPLEX_FULL;
877                 }
878                 else {
879                         bp->duplex = DUPLEX_HALF;
880                 }
881         }
882
883         return 0;
884 }
885
886 static int
887 bnx2_set_mac_link(struct bnx2 *bp)
888 {
889         u32 val;
890
891         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893                 (bp->duplex == DUPLEX_HALF)) {
894                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
895         }
896
897         /* Configure the EMAC mode register. */
898         val = REG_RD(bp, BNX2_EMAC_MODE);
899
900         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
901                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
902                 BNX2_EMAC_MODE_25G_MODE);
903
904         if (bp->link_up) {
905                 switch (bp->line_speed) {
906                         case SPEED_10:
907                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
909                                         break;
910                                 }
911                                 /* fall through */
912                         case SPEED_100:
913                                 val |= BNX2_EMAC_MODE_PORT_MII;
914                                 break;
915                         case SPEED_2500:
916                                 val |= BNX2_EMAC_MODE_25G_MODE;
917                                 /* fall through */
918                         case SPEED_1000:
919                                 val |= BNX2_EMAC_MODE_PORT_GMII;
920                                 break;
921                 }
922         }
923         else {
924                 val |= BNX2_EMAC_MODE_PORT_GMII;
925         }
926
927         /* Set the MAC to operate in the appropriate duplex mode. */
928         if (bp->duplex == DUPLEX_HALF)
929                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930         REG_WR(bp, BNX2_EMAC_MODE, val);
931
932         /* Enable/disable rx PAUSE. */
933         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
934
935         if (bp->flow_ctrl & FLOW_CTRL_RX)
936                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
938
939         /* Enable/disable tx PAUSE. */
940         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
942
943         if (bp->flow_ctrl & FLOW_CTRL_TX)
944                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
946
947         /* Acknowledge the interrupt. */
948         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
949
950         return 0;
951 }
952
953 static void
954 bnx2_enable_bmsr1(struct bnx2 *bp)
955 {
956         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957             (CHIP_NUM(bp) == CHIP_NUM_5709))
958                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959                                MII_BNX2_BLK_ADDR_GP_STATUS);
960 }
961
962 static void
963 bnx2_disable_bmsr1(struct bnx2 *bp)
964 {
965         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966             (CHIP_NUM(bp) == CHIP_NUM_5709))
967                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
969 }
970
971 static int
972 bnx2_test_and_enable_2g5(struct bnx2 *bp)
973 {
974         u32 up1;
975         int ret = 1;
976
977         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
978                 return 0;
979
980         if (bp->autoneg & AUTONEG_SPEED)
981                 bp->advertising |= ADVERTISED_2500baseX_Full;
982
983         if (CHIP_NUM(bp) == CHIP_NUM_5709)
984                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
985
986         bnx2_read_phy(bp, bp->mii_up1, &up1);
987         if (!(up1 & BCM5708S_UP1_2G5)) {
988                 up1 |= BCM5708S_UP1_2G5;
989                 bnx2_write_phy(bp, bp->mii_up1, up1);
990                 ret = 0;
991         }
992
993         if (CHIP_NUM(bp) == CHIP_NUM_5709)
994                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996
997         return ret;
998 }
999
1000 static int
1001 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1002 {
1003         u32 up1;
1004         int ret = 0;
1005
1006         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1007                 return 0;
1008
1009         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
1012         bnx2_read_phy(bp, bp->mii_up1, &up1);
1013         if (up1 & BCM5708S_UP1_2G5) {
1014                 up1 &= ~BCM5708S_UP1_2G5;
1015                 bnx2_write_phy(bp, bp->mii_up1, up1);
1016                 ret = 1;
1017         }
1018
1019         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
1023         return ret;
1024 }
1025
1026 static void
1027 bnx2_enable_forced_2g5(struct bnx2 *bp)
1028 {
1029         u32 bmcr;
1030
1031         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1032                 return;
1033
1034         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1035                 u32 val;
1036
1037                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1039                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1043
1044                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1047
1048         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1049                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1051         }
1052
1053         if (bp->autoneg & AUTONEG_SPEED) {
1054                 bmcr &= ~BMCR_ANENABLE;
1055                 if (bp->req_duplex == DUPLEX_FULL)
1056                         bmcr |= BMCR_FULLDPLX;
1057         }
1058         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1059 }
1060
1061 static void
1062 bnx2_disable_forced_2g5(struct bnx2 *bp)
1063 {
1064         u32 bmcr;
1065
1066         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1067                 return;
1068
1069         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1070                 u32 val;
1071
1072                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1074                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1077
1078                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1081
1082         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1083                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1085         }
1086
1087         if (bp->autoneg & AUTONEG_SPEED)
1088                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1090 }
1091
1092 static int
1093 bnx2_set_link(struct bnx2 *bp)
1094 {
1095         u32 bmsr;
1096         u8 link_up;
1097
1098         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1099                 bp->link_up = 1;
1100                 return 0;
1101         }
1102
1103         link_up = bp->link_up;
1104
1105         bnx2_enable_bmsr1(bp);
1106         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108         bnx2_disable_bmsr1(bp);
1109
1110         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1112                 u32 val;
1113
1114                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115                 if (val & BNX2_EMAC_STATUS_LINK)
1116                         bmsr |= BMSR_LSTATUS;
1117                 else
1118                         bmsr &= ~BMSR_LSTATUS;
1119         }
1120
1121         if (bmsr & BMSR_LSTATUS) {
1122                 bp->link_up = 1;
1123
1124                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1125                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126                                 bnx2_5706s_linkup(bp);
1127                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128                                 bnx2_5708s_linkup(bp);
1129                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130                                 bnx2_5709s_linkup(bp);
1131                 }
1132                 else {
1133                         bnx2_copper_linkup(bp);
1134                 }
1135                 bnx2_resolve_flow_ctrl(bp);
1136         }
1137         else {
1138                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1139                     (bp->autoneg & AUTONEG_SPEED))
1140                         bnx2_disable_forced_2g5(bp);
1141
1142                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1143                 bp->link_up = 0;
1144         }
1145
1146         if (bp->link_up != link_up) {
1147                 bnx2_report_link(bp);
1148         }
1149
1150         bnx2_set_mac_link(bp);
1151
1152         return 0;
1153 }
1154
1155 static int
1156 bnx2_reset_phy(struct bnx2 *bp)
1157 {
1158         int i;
1159         u32 reg;
1160
1161         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1162
1163 #define PHY_RESET_MAX_WAIT 100
1164         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1165                 udelay(10);
1166
1167                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1168                 if (!(reg & BMCR_RESET)) {
1169                         udelay(20);
1170                         break;
1171                 }
1172         }
1173         if (i == PHY_RESET_MAX_WAIT) {
1174                 return -EBUSY;
1175         }
1176         return 0;
1177 }
1178
1179 static u32
1180 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1181 {
1182         u32 adv = 0;
1183
1184         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1186
1187                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188                         adv = ADVERTISE_1000XPAUSE;
1189                 }
1190                 else {
1191                         adv = ADVERTISE_PAUSE_CAP;
1192                 }
1193         }
1194         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196                         adv = ADVERTISE_1000XPSE_ASYM;
1197                 }
1198                 else {
1199                         adv = ADVERTISE_PAUSE_ASYM;
1200                 }
1201         }
1202         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1205                 }
1206                 else {
1207                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1208                 }
1209         }
1210         return adv;
1211 }
1212
1213 static int
1214 bnx2_setup_serdes_phy(struct bnx2 *bp)
1215 {
1216         u32 adv, bmcr;
1217         u32 new_adv = 0;
1218
1219         if (!(bp->autoneg & AUTONEG_SPEED)) {
1220                 u32 new_bmcr;
1221                 int force_link_down = 0;
1222
1223                 if (bp->req_line_speed == SPEED_2500) {
1224                         if (!bnx2_test_and_enable_2g5(bp))
1225                                 force_link_down = 1;
1226                 } else if (bp->req_line_speed == SPEED_1000) {
1227                         if (bnx2_test_and_disable_2g5(bp))
1228                                 force_link_down = 1;
1229                 }
1230                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1231                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1232
1233                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1234                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1235                 new_bmcr |= BMCR_SPEED1000;
1236
1237                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238                         if (bp->req_line_speed == SPEED_2500)
1239                                 bnx2_enable_forced_2g5(bp);
1240                         else if (bp->req_line_speed == SPEED_1000) {
1241                                 bnx2_disable_forced_2g5(bp);
1242                                 new_bmcr &= ~0x2000;
1243                         }
1244
1245                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1246                         if (bp->req_line_speed == SPEED_2500)
1247                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1248                         else
1249                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1250                 }
1251
1252                 if (bp->req_duplex == DUPLEX_FULL) {
1253                         adv |= ADVERTISE_1000XFULL;
1254                         new_bmcr |= BMCR_FULLDPLX;
1255                 }
1256                 else {
1257                         adv |= ADVERTISE_1000XHALF;
1258                         new_bmcr &= ~BMCR_FULLDPLX;
1259                 }
1260                 if ((new_bmcr != bmcr) || (force_link_down)) {
1261                         /* Force a link down visible on the other side */
1262                         if (bp->link_up) {
1263                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1264                                                ~(ADVERTISE_1000XFULL |
1265                                                  ADVERTISE_1000XHALF));
1266                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1267                                         BMCR_ANRESTART | BMCR_ANENABLE);
1268
1269                                 bp->link_up = 0;
1270                                 netif_carrier_off(bp->dev);
1271                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1272                                 bnx2_report_link(bp);
1273                         }
1274                         bnx2_write_phy(bp, bp->mii_adv, adv);
1275                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1276                 } else {
1277                         bnx2_resolve_flow_ctrl(bp);
1278                         bnx2_set_mac_link(bp);
1279                 }
1280                 return 0;
1281         }
1282
1283         bnx2_test_and_enable_2g5(bp);
1284
1285         if (bp->advertising & ADVERTISED_1000baseT_Full)
1286                 new_adv |= ADVERTISE_1000XFULL;
1287
1288         new_adv |= bnx2_phy_get_pause_adv(bp);
1289
1290         bnx2_read_phy(bp, bp->mii_adv, &adv);
1291         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1292
1293         bp->serdes_an_pending = 0;
1294         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295                 /* Force a link down visible on the other side */
1296                 if (bp->link_up) {
1297                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1298                         spin_unlock_bh(&bp->phy_lock);
1299                         msleep(20);
1300                         spin_lock_bh(&bp->phy_lock);
1301                 }
1302
1303                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1305                         BMCR_ANENABLE);
1306                 /* Speed up link-up time when the link partner
1307                  * does not autonegotiate which is very common
1308                  * in blade servers. Some blade servers use
1309                  * IPMI for kerboard input and it's important
1310                  * to minimize link disruptions. Autoneg. involves
1311                  * exchanging base pages plus 3 next pages and
1312                  * normally completes in about 120 msec.
1313                  */
1314                 bp->current_interval = SERDES_AN_TIMEOUT;
1315                 bp->serdes_an_pending = 1;
1316                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1317         } else {
1318                 bnx2_resolve_flow_ctrl(bp);
1319                 bnx2_set_mac_link(bp);
1320         }
1321
1322         return 0;
1323 }
1324
1325 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1326         (ADVERTISED_1000baseT_Full)
1327
1328 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1329         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1330         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1331         ADVERTISED_1000baseT_Full)
1332
1333 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1335
1336 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1337
1338 static int
1339 bnx2_setup_copper_phy(struct bnx2 *bp)
1340 {
1341         u32 bmcr;
1342         u32 new_bmcr;
1343
1344         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1345
1346         if (bp->autoneg & AUTONEG_SPEED) {
1347                 u32 adv_reg, adv1000_reg;
1348                 u32 new_adv_reg = 0;
1349                 u32 new_adv1000_reg = 0;
1350
1351                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1352                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353                         ADVERTISE_PAUSE_ASYM);
1354
1355                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356                 adv1000_reg &= PHY_ALL_1000_SPEED;
1357
1358                 if (bp->advertising & ADVERTISED_10baseT_Half)
1359                         new_adv_reg |= ADVERTISE_10HALF;
1360                 if (bp->advertising & ADVERTISED_10baseT_Full)
1361                         new_adv_reg |= ADVERTISE_10FULL;
1362                 if (bp->advertising & ADVERTISED_100baseT_Half)
1363                         new_adv_reg |= ADVERTISE_100HALF;
1364                 if (bp->advertising & ADVERTISED_100baseT_Full)
1365                         new_adv_reg |= ADVERTISE_100FULL;
1366                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1367                         new_adv1000_reg |= ADVERTISE_1000FULL;
1368
1369                 new_adv_reg |= ADVERTISE_CSMA;
1370
1371                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1372
1373                 if ((adv1000_reg != new_adv1000_reg) ||
1374                         (adv_reg != new_adv_reg) ||
1375                         ((bmcr & BMCR_ANENABLE) == 0)) {
1376
1377                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1378                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1379                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1380                                 BMCR_ANENABLE);
1381                 }
1382                 else if (bp->link_up) {
1383                         /* Flow ctrl may have changed from auto to forced */
1384                         /* or vice-versa. */
1385
1386                         bnx2_resolve_flow_ctrl(bp);
1387                         bnx2_set_mac_link(bp);
1388                 }
1389                 return 0;
1390         }
1391
1392         new_bmcr = 0;
1393         if (bp->req_line_speed == SPEED_100) {
1394                 new_bmcr |= BMCR_SPEED100;
1395         }
1396         if (bp->req_duplex == DUPLEX_FULL) {
1397                 new_bmcr |= BMCR_FULLDPLX;
1398         }
1399         if (new_bmcr != bmcr) {
1400                 u32 bmsr;
1401
1402                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1404
1405                 if (bmsr & BMSR_LSTATUS) {
1406                         /* Force link down */
1407                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1408                         spin_unlock_bh(&bp->phy_lock);
1409                         msleep(50);
1410                         spin_lock_bh(&bp->phy_lock);
1411
1412                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1414                 }
1415
1416                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1417
1418                 /* Normally, the new speed is setup after the link has
1419                  * gone down and up again. In some cases, link will not go
1420                  * down so we need to set up the new speed here.
1421                  */
1422                 if (bmsr & BMSR_LSTATUS) {
1423                         bp->line_speed = bp->req_line_speed;
1424                         bp->duplex = bp->req_duplex;
1425                         bnx2_resolve_flow_ctrl(bp);
1426                         bnx2_set_mac_link(bp);
1427                 }
1428         } else {
1429                 bnx2_resolve_flow_ctrl(bp);
1430                 bnx2_set_mac_link(bp);
1431         }
1432         return 0;
1433 }
1434
1435 static int
1436 bnx2_setup_phy(struct bnx2 *bp)
1437 {
1438         if (bp->loopback == MAC_LOOPBACK)
1439                 return 0;
1440
1441         if (bp->phy_flags & PHY_SERDES_FLAG) {
1442                 return (bnx2_setup_serdes_phy(bp));
1443         }
1444         else {
1445                 return (bnx2_setup_copper_phy(bp));
1446         }
1447 }
1448
1449 static int
1450 bnx2_init_5709s_phy(struct bnx2 *bp)
1451 {
1452         u32 val;
1453
1454         bp->mii_bmcr = MII_BMCR + 0x10;
1455         bp->mii_bmsr = MII_BMSR + 0x10;
1456         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457         bp->mii_adv = MII_ADVERTISE + 0x10;
1458         bp->mii_lpa = MII_LPA + 0x10;
1459         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1460
1461         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1463
1464         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465         bnx2_reset_phy(bp);
1466
1467         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1468
1469         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1473
1474         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477                 val |= BCM5708S_UP1_2G5;
1478         else
1479                 val &= ~BCM5708S_UP1_2G5;
1480         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1481
1482         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1486
1487         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1488
1489         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1492
1493         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494
1495         return 0;
1496 }
1497
1498 static int
1499 bnx2_init_5708s_phy(struct bnx2 *bp)
1500 {
1501         u32 val;
1502
1503         bnx2_reset_phy(bp);
1504
1505         bp->mii_up1 = BCM5708S_UP1;
1506
1507         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1510
1511         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1514
1515         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1518
1519         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521                 val |= BCM5708S_UP1_2G5;
1522                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1523         }
1524
1525         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1526             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1528                 /* increase tx signal amplitude */
1529                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530                                BCM5708S_BLK_ADDR_TX_MISC);
1531                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1535         }
1536
1537         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1538               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1539
1540         if (val) {
1541                 u32 is_backplane;
1542
1543                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1544                                           BNX2_SHARED_HW_CFG_CONFIG);
1545                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547                                        BCM5708S_BLK_ADDR_TX_MISC);
1548                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550                                        BCM5708S_BLK_ADDR_DIG);
1551                 }
1552         }
1553         return 0;
1554 }
1555
1556 static int
1557 bnx2_init_5706s_phy(struct bnx2 *bp)
1558 {
1559         bnx2_reset_phy(bp);
1560
1561         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1562
1563         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1565
1566         if (bp->dev->mtu > 1500) {
1567                 u32 val;
1568
1569                 /* Set extended packet length bit */
1570                 bnx2_write_phy(bp, 0x18, 0x7);
1571                 bnx2_read_phy(bp, 0x18, &val);
1572                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1573
1574                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1575                 bnx2_read_phy(bp, 0x1c, &val);
1576                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1577         }
1578         else {
1579                 u32 val;
1580
1581                 bnx2_write_phy(bp, 0x18, 0x7);
1582                 bnx2_read_phy(bp, 0x18, &val);
1583                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1584
1585                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1586                 bnx2_read_phy(bp, 0x1c, &val);
1587                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1588         }
1589
1590         return 0;
1591 }
1592
1593 static int
1594 bnx2_init_copper_phy(struct bnx2 *bp)
1595 {
1596         u32 val;
1597
1598         bnx2_reset_phy(bp);
1599
1600         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601                 bnx2_write_phy(bp, 0x18, 0x0c00);
1602                 bnx2_write_phy(bp, 0x17, 0x000a);
1603                 bnx2_write_phy(bp, 0x15, 0x310b);
1604                 bnx2_write_phy(bp, 0x17, 0x201f);
1605                 bnx2_write_phy(bp, 0x15, 0x9506);
1606                 bnx2_write_phy(bp, 0x17, 0x401f);
1607                 bnx2_write_phy(bp, 0x15, 0x14e2);
1608                 bnx2_write_phy(bp, 0x18, 0x0400);
1609         }
1610
1611         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1614                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1615                 val &= ~(1 << 8);
1616                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1617         }
1618
1619         if (bp->dev->mtu > 1500) {
1620                 /* Set extended packet length bit */
1621                 bnx2_write_phy(bp, 0x18, 0x7);
1622                 bnx2_read_phy(bp, 0x18, &val);
1623                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1624
1625                 bnx2_read_phy(bp, 0x10, &val);
1626                 bnx2_write_phy(bp, 0x10, val | 0x1);
1627         }
1628         else {
1629                 bnx2_write_phy(bp, 0x18, 0x7);
1630                 bnx2_read_phy(bp, 0x18, &val);
1631                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1632
1633                 bnx2_read_phy(bp, 0x10, &val);
1634                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1635         }
1636
1637         /* ethernet@wirespeed */
1638         bnx2_write_phy(bp, 0x18, 0x7007);
1639         bnx2_read_phy(bp, 0x18, &val);
1640         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1641         return 0;
1642 }
1643
1644
1645 static int
1646 bnx2_init_phy(struct bnx2 *bp)
1647 {
1648         u32 val;
1649         int rc = 0;
1650
1651         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1653
1654         bp->mii_bmcr = MII_BMCR;
1655         bp->mii_bmsr = MII_BMSR;
1656         bp->mii_bmsr1 = MII_BMSR;
1657         bp->mii_adv = MII_ADVERTISE;
1658         bp->mii_lpa = MII_LPA;
1659
1660         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1661
1662         bnx2_read_phy(bp, MII_PHYSID1, &val);
1663         bp->phy_id = val << 16;
1664         bnx2_read_phy(bp, MII_PHYSID2, &val);
1665         bp->phy_id |= val & 0xffff;
1666
1667         if (bp->phy_flags & PHY_SERDES_FLAG) {
1668                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669                         rc = bnx2_init_5706s_phy(bp);
1670                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671                         rc = bnx2_init_5708s_phy(bp);
1672                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673                         rc = bnx2_init_5709s_phy(bp);
1674         }
1675         else {
1676                 rc = bnx2_init_copper_phy(bp);
1677         }
1678
1679         bnx2_setup_phy(bp);
1680
1681         return rc;
1682 }
1683
1684 static int
1685 bnx2_set_mac_loopback(struct bnx2 *bp)
1686 {
1687         u32 mac_mode;
1688
1689         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1693         bp->link_up = 1;
1694         return 0;
1695 }
1696
1697 static int bnx2_test_link(struct bnx2 *);
1698
1699 static int
1700 bnx2_set_phy_loopback(struct bnx2 *bp)
1701 {
1702         u32 mac_mode;
1703         int rc, i;
1704
1705         spin_lock_bh(&bp->phy_lock);
1706         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1707                             BMCR_SPEED1000);
1708         spin_unlock_bh(&bp->phy_lock);
1709         if (rc)
1710                 return rc;
1711
1712         for (i = 0; i < 10; i++) {
1713                 if (bnx2_test_link(bp) == 0)
1714                         break;
1715                 msleep(100);
1716         }
1717
1718         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1721                       BNX2_EMAC_MODE_25G_MODE);
1722
1723         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1725         bp->link_up = 1;
1726         return 0;
1727 }
1728
1729 static int
1730 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1731 {
1732         int i;
1733         u32 val;
1734
1735         bp->fw_wr_seq++;
1736         msg_data |= bp->fw_wr_seq;
1737
1738         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1739
1740         /* wait for an acknowledgement. */
1741         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1742                 msleep(10);
1743
1744                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1745
1746                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1747                         break;
1748         }
1749         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1750                 return 0;
1751
1752         /* If we timed out, inform the firmware that this is the case. */
1753         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1754                 if (!silent)
1755                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1756                                             "%x\n", msg_data);
1757
1758                 msg_data &= ~BNX2_DRV_MSG_CODE;
1759                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1760
1761                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1762
1763                 return -EBUSY;
1764         }
1765
1766         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1767                 return -EIO;
1768
1769         return 0;
1770 }
1771
1772 static int
1773 bnx2_init_5709_context(struct bnx2 *bp)
1774 {
1775         int i, ret = 0;
1776         u32 val;
1777
1778         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779         val |= (BCM_PAGE_BITS - 8) << 16;
1780         REG_WR(bp, BNX2_CTX_COMMAND, val);
1781         for (i = 0; i < bp->ctx_pages; i++) {
1782                 int j;
1783
1784                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1785                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
1786                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1787                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1788                        (u64) bp->ctx_blk_mapping[i] >> 32);
1789                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1790                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1791                 for (j = 0; j < 10; j++) {
1792
1793                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1794                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1795                                 break;
1796                         udelay(5);
1797                 }
1798                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1799                         ret = -EBUSY;
1800                         break;
1801                 }
1802         }
1803         return ret;
1804 }
1805
1806 static void
1807 bnx2_init_context(struct bnx2 *bp)
1808 {
1809         u32 vcid;
1810
1811         vcid = 96;
1812         while (vcid) {
1813                 u32 vcid_addr, pcid_addr, offset;
1814
1815                 vcid--;
1816
1817                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1818                         u32 new_vcid;
1819
1820                         vcid_addr = GET_PCID_ADDR(vcid);
1821                         if (vcid & 0x8) {
1822                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1823                         }
1824                         else {
1825                                 new_vcid = vcid;
1826                         }
1827                         pcid_addr = GET_PCID_ADDR(new_vcid);
1828                 }
1829                 else {
1830                         vcid_addr = GET_CID_ADDR(vcid);
1831                         pcid_addr = vcid_addr;
1832                 }
1833
1834                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1835                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1836
1837                 /* Zero out the context. */
1838                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1839                         CTX_WR(bp, 0x00, offset, 0);
1840                 }
1841
1842                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1843                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1844         }
1845 }
1846
1847 static int
1848 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1849 {
1850         u16 *good_mbuf;
1851         u32 good_mbuf_cnt;
1852         u32 val;
1853
1854         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1855         if (good_mbuf == NULL) {
1856                 printk(KERN_ERR PFX "Failed to allocate memory in "
1857                                     "bnx2_alloc_bad_rbuf\n");
1858                 return -ENOMEM;
1859         }
1860
1861         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1862                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1863
1864         good_mbuf_cnt = 0;
1865
1866         /* Allocate a bunch of mbufs and save the good ones in an array. */
1867         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1868         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1869                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1870
1871                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1872
1873                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1874
1875                 /* The addresses with Bit 9 set are bad memory blocks. */
1876                 if (!(val & (1 << 9))) {
1877                         good_mbuf[good_mbuf_cnt] = (u16) val;
1878                         good_mbuf_cnt++;
1879                 }
1880
1881                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1882         }
1883
1884         /* Free the good ones back to the mbuf pool thus discarding
1885          * all the bad ones. */
1886         while (good_mbuf_cnt) {
1887                 good_mbuf_cnt--;
1888
1889                 val = good_mbuf[good_mbuf_cnt];
1890                 val = (val << 9) | val | 1;
1891
1892                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1893         }
1894         kfree(good_mbuf);
1895         return 0;
1896 }
1897
1898 static void
1899 bnx2_set_mac_addr(struct bnx2 *bp)
1900 {
1901         u32 val;
1902         u8 *mac_addr = bp->dev->dev_addr;
1903
1904         val = (mac_addr[0] << 8) | mac_addr[1];
1905
1906         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1907
1908         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1909                 (mac_addr[4] << 8) | mac_addr[5];
1910
1911         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1912 }
1913
1914 static inline int
1915 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1916 {
1917         struct sk_buff *skb;
1918         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1919         dma_addr_t mapping;
1920         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1921         unsigned long align;
1922
1923         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1924         if (skb == NULL) {
1925                 return -ENOMEM;
1926         }
1927
1928         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1929                 skb_reserve(skb, BNX2_RX_ALIGN - align);
1930
1931         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1932                 PCI_DMA_FROMDEVICE);
1933
1934         rx_buf->skb = skb;
1935         pci_unmap_addr_set(rx_buf, mapping, mapping);
1936
1937         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1938         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1939
1940         bp->rx_prod_bseq += bp->rx_buf_use_size;
1941
1942         return 0;
1943 }
1944
1945 static int
1946 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
1947 {
1948         struct status_block *sblk = bp->status_blk;
1949         u32 new_link_state, old_link_state;
1950         int is_set = 1;
1951
1952         new_link_state = sblk->status_attn_bits & event;
1953         old_link_state = sblk->status_attn_bits_ack & event;
1954         if (new_link_state != old_link_state) {
1955                 if (new_link_state)
1956                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
1957                 else
1958                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
1959         } else
1960                 is_set = 0;
1961
1962         return is_set;
1963 }
1964
1965 static void
1966 bnx2_phy_int(struct bnx2 *bp)
1967 {
1968         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
1969                 spin_lock(&bp->phy_lock);
1970                 bnx2_set_link(bp);
1971                 spin_unlock(&bp->phy_lock);
1972         }
1973 }
1974
1975 static void
1976 bnx2_tx_int(struct bnx2 *bp)
1977 {
1978         struct status_block *sblk = bp->status_blk;
1979         u16 hw_cons, sw_cons, sw_ring_cons;
1980         int tx_free_bd = 0;
1981
1982         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1983         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1984                 hw_cons++;
1985         }
1986         sw_cons = bp->tx_cons;
1987
1988         while (sw_cons != hw_cons) {
1989                 struct sw_bd *tx_buf;
1990                 struct sk_buff *skb;
1991                 int i, last;
1992
1993                 sw_ring_cons = TX_RING_IDX(sw_cons);
1994
1995                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1996                 skb = tx_buf->skb;
1997
1998                 /* partial BD completions possible with TSO packets */
1999                 if (skb_is_gso(skb)) {
2000                         u16 last_idx, last_ring_idx;
2001
2002                         last_idx = sw_cons +
2003                                 skb_shinfo(skb)->nr_frags + 1;
2004                         last_ring_idx = sw_ring_cons +
2005                                 skb_shinfo(skb)->nr_frags + 1;
2006                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2007                                 last_idx++;
2008                         }
2009                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2010                                 break;
2011                         }
2012                 }
2013
2014                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2015                         skb_headlen(skb), PCI_DMA_TODEVICE);
2016
2017                 tx_buf->skb = NULL;
2018                 last = skb_shinfo(skb)->nr_frags;
2019
2020                 for (i = 0; i < last; i++) {
2021                         sw_cons = NEXT_TX_BD(sw_cons);
2022
2023                         pci_unmap_page(bp->pdev,
2024                                 pci_unmap_addr(
2025                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2026                                         mapping),
2027                                 skb_shinfo(skb)->frags[i].size,
2028                                 PCI_DMA_TODEVICE);
2029                 }
2030
2031                 sw_cons = NEXT_TX_BD(sw_cons);
2032
2033                 tx_free_bd += last + 1;
2034
2035                 dev_kfree_skb(skb);
2036
2037                 hw_cons = bp->hw_tx_cons =
2038                         sblk->status_tx_quick_consumer_index0;
2039
2040                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2041                         hw_cons++;
2042                 }
2043         }
2044
2045         bp->tx_cons = sw_cons;
2046         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2047          * before checking for netif_queue_stopped().  Without the
2048          * memory barrier, there is a small possibility that bnx2_start_xmit()
2049          * will miss it and cause the queue to be stopped forever.
2050          */
2051         smp_mb();
2052
2053         if (unlikely(netif_queue_stopped(bp->dev)) &&
2054                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2055                 netif_tx_lock(bp->dev);
2056                 if ((netif_queue_stopped(bp->dev)) &&
2057                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2058                         netif_wake_queue(bp->dev);
2059                 netif_tx_unlock(bp->dev);
2060         }
2061 }
2062
2063 static inline void
2064 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2065         u16 cons, u16 prod)
2066 {
2067         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2068         struct rx_bd *cons_bd, *prod_bd;
2069
2070         cons_rx_buf = &bp->rx_buf_ring[cons];
2071         prod_rx_buf = &bp->rx_buf_ring[prod];
2072
2073         pci_dma_sync_single_for_device(bp->pdev,
2074                 pci_unmap_addr(cons_rx_buf, mapping),
2075                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2076
2077         bp->rx_prod_bseq += bp->rx_buf_use_size;
2078
2079         prod_rx_buf->skb = skb;
2080
2081         if (cons == prod)
2082                 return;
2083
2084         pci_unmap_addr_set(prod_rx_buf, mapping,
2085                         pci_unmap_addr(cons_rx_buf, mapping));
2086
2087         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2088         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2089         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2090         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2091 }
2092
2093 static int
2094 bnx2_rx_int(struct bnx2 *bp, int budget)
2095 {
2096         struct status_block *sblk = bp->status_blk;
2097         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2098         struct l2_fhdr *rx_hdr;
2099         int rx_pkt = 0;
2100
2101         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2102         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2103                 hw_cons++;
2104         }
2105         sw_cons = bp->rx_cons;
2106         sw_prod = bp->rx_prod;
2107
2108         /* Memory barrier necessary as speculative reads of the rx
2109          * buffer can be ahead of the index in the status block
2110          */
2111         rmb();
2112         while (sw_cons != hw_cons) {
2113                 unsigned int len;
2114                 u32 status;
2115                 struct sw_bd *rx_buf;
2116                 struct sk_buff *skb;
2117                 dma_addr_t dma_addr;
2118
2119                 sw_ring_cons = RX_RING_IDX(sw_cons);
2120                 sw_ring_prod = RX_RING_IDX(sw_prod);
2121
2122                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2123                 skb = rx_buf->skb;
2124
2125                 rx_buf->skb = NULL;
2126
2127                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2128
2129                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2130                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2131
2132                 rx_hdr = (struct l2_fhdr *) skb->data;
2133                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2134
2135                 if ((status = rx_hdr->l2_fhdr_status) &
2136                         (L2_FHDR_ERRORS_BAD_CRC |
2137                         L2_FHDR_ERRORS_PHY_DECODE |
2138                         L2_FHDR_ERRORS_ALIGNMENT |
2139                         L2_FHDR_ERRORS_TOO_SHORT |
2140                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2141
2142                         goto reuse_rx;
2143                 }
2144
2145                 /* Since we don't have a jumbo ring, copy small packets
2146                  * if mtu > 1500
2147                  */
2148                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2149                         struct sk_buff *new_skb;
2150
2151                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2152                         if (new_skb == NULL)
2153                                 goto reuse_rx;
2154
2155                         /* aligned copy */
2156                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2157                                       new_skb->data, len + 2);
2158                         skb_reserve(new_skb, 2);
2159                         skb_put(new_skb, len);
2160
2161                         bnx2_reuse_rx_skb(bp, skb,
2162                                 sw_ring_cons, sw_ring_prod);
2163
2164                         skb = new_skb;
2165                 }
2166                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2167                         pci_unmap_single(bp->pdev, dma_addr,
2168                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2169
2170                         skb_reserve(skb, bp->rx_offset);
2171                         skb_put(skb, len);
2172                 }
2173                 else {
2174 reuse_rx:
2175                         bnx2_reuse_rx_skb(bp, skb,
2176                                 sw_ring_cons, sw_ring_prod);
2177                         goto next_rx;
2178                 }
2179
2180                 skb->protocol = eth_type_trans(skb, bp->dev);
2181
2182                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2183                         (ntohs(skb->protocol) != 0x8100)) {
2184
2185                         dev_kfree_skb(skb);
2186                         goto next_rx;
2187
2188                 }
2189
2190                 skb->ip_summed = CHECKSUM_NONE;
2191                 if (bp->rx_csum &&
2192                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2193                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2194
2195                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2196                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2197                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2198                 }
2199
2200 #ifdef BCM_VLAN
2201                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2202                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2203                                 rx_hdr->l2_fhdr_vlan_tag);
2204                 }
2205                 else
2206 #endif
2207                         netif_receive_skb(skb);
2208
2209                 bp->dev->last_rx = jiffies;
2210                 rx_pkt++;
2211
2212 next_rx:
2213                 sw_cons = NEXT_RX_BD(sw_cons);
2214                 sw_prod = NEXT_RX_BD(sw_prod);
2215
2216                 if ((rx_pkt == budget))
2217                         break;
2218
2219                 /* Refresh hw_cons to see if there is new work */
2220                 if (sw_cons == hw_cons) {
2221                         hw_cons = bp->hw_rx_cons =
2222                                 sblk->status_rx_quick_consumer_index0;
2223                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2224                                 hw_cons++;
2225                         rmb();
2226                 }
2227         }
2228         bp->rx_cons = sw_cons;
2229         bp->rx_prod = sw_prod;
2230
2231         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2232
2233         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2234
2235         mmiowb();
2236
2237         return rx_pkt;
2238
2239 }
2240
2241 /* MSI ISR - The only difference between this and the INTx ISR
2242  * is that the MSI interrupt is always serviced.
2243  */
2244 static irqreturn_t
2245 bnx2_msi(int irq, void *dev_instance)
2246 {
2247         struct net_device *dev = dev_instance;
2248         struct bnx2 *bp = netdev_priv(dev);
2249
2250         prefetch(bp->status_blk);
2251         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2252                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2253                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2254
2255         /* Return here if interrupt is disabled. */
2256         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2257                 return IRQ_HANDLED;
2258
2259         netif_rx_schedule(dev);
2260
2261         return IRQ_HANDLED;
2262 }
2263
2264 static irqreturn_t
2265 bnx2_msi_1shot(int irq, void *dev_instance)
2266 {
2267         struct net_device *dev = dev_instance;
2268         struct bnx2 *bp = netdev_priv(dev);
2269
2270         prefetch(bp->status_blk);
2271
2272         /* Return here if interrupt is disabled. */
2273         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2274                 return IRQ_HANDLED;
2275
2276         netif_rx_schedule(dev);
2277
2278         return IRQ_HANDLED;
2279 }
2280
2281 static irqreturn_t
2282 bnx2_interrupt(int irq, void *dev_instance)
2283 {
2284         struct net_device *dev = dev_instance;
2285         struct bnx2 *bp = netdev_priv(dev);
2286
2287         /* When using INTx, it is possible for the interrupt to arrive
2288          * at the CPU before the status block posted prior to the
2289          * interrupt. Reading a register will flush the status block.
2290          * When using MSI, the MSI message will always complete after
2291          * the status block write.
2292          */
2293         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2294             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2295              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2296                 return IRQ_NONE;
2297
2298         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2299                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2300                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2301
2302         /* Return here if interrupt is shared and is disabled. */
2303         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2304                 return IRQ_HANDLED;
2305
2306         netif_rx_schedule(dev);
2307
2308         return IRQ_HANDLED;
2309 }
2310
2311 #define STATUS_ATTN_EVENTS      STATUS_ATTN_BITS_LINK_STATE
2312
2313 static inline int
2314 bnx2_has_work(struct bnx2 *bp)
2315 {
2316         struct status_block *sblk = bp->status_blk;
2317
2318         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2319             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2320                 return 1;
2321
2322         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2323             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2324                 return 1;
2325
2326         return 0;
2327 }
2328
2329 static int
2330 bnx2_poll(struct net_device *dev, int *budget)
2331 {
2332         struct bnx2 *bp = netdev_priv(dev);
2333         struct status_block *sblk = bp->status_blk;
2334         u32 status_attn_bits = sblk->status_attn_bits;
2335         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2336
2337         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2338             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2339
2340                 bnx2_phy_int(bp);
2341
2342                 /* This is needed to take care of transient status
2343                  * during link changes.
2344                  */
2345                 REG_WR(bp, BNX2_HC_COMMAND,
2346                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2347                 REG_RD(bp, BNX2_HC_COMMAND);
2348         }
2349
2350         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2351                 bnx2_tx_int(bp);
2352
2353         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2354                 int orig_budget = *budget;
2355                 int work_done;
2356
2357                 if (orig_budget > dev->quota)
2358                         orig_budget = dev->quota;
2359
2360                 work_done = bnx2_rx_int(bp, orig_budget);
2361                 *budget -= work_done;
2362                 dev->quota -= work_done;
2363         }
2364
2365         bp->last_status_idx = bp->status_blk->status_idx;
2366         rmb();
2367
2368         if (!bnx2_has_work(bp)) {
2369                 netif_rx_complete(dev);
2370                 if (likely(bp->flags & USING_MSI_FLAG)) {
2371                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2372                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2373                                bp->last_status_idx);
2374                         return 0;
2375                 }
2376                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2377                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2378                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2379                        bp->last_status_idx);
2380
2381                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2382                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2383                        bp->last_status_idx);
2384                 return 0;
2385         }
2386
2387         return 1;
2388 }
2389
2390 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2391  * from set_multicast.
2392  */
2393 static void
2394 bnx2_set_rx_mode(struct net_device *dev)
2395 {
2396         struct bnx2 *bp = netdev_priv(dev);
2397         u32 rx_mode, sort_mode;
2398         int i;
2399
2400         spin_lock_bh(&bp->phy_lock);
2401
2402         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2403                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2404         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2405 #ifdef BCM_VLAN
2406         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2407                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2408 #else
2409         if (!(bp->flags & ASF_ENABLE_FLAG))
2410                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2411 #endif
2412         if (dev->flags & IFF_PROMISC) {
2413                 /* Promiscuous mode. */
2414                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2415                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2416                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2417         }
2418         else if (dev->flags & IFF_ALLMULTI) {
2419                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2420                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2421                                0xffffffff);
2422                 }
2423                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2424         }
2425         else {
2426                 /* Accept one or more multicast(s). */
2427                 struct dev_mc_list *mclist;
2428                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2429                 u32 regidx;
2430                 u32 bit;
2431                 u32 crc;
2432
2433                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2434
2435                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2436                      i++, mclist = mclist->next) {
2437
2438                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2439                         bit = crc & 0xff;
2440                         regidx = (bit & 0xe0) >> 5;
2441                         bit &= 0x1f;
2442                         mc_filter[regidx] |= (1 << bit);
2443                 }
2444
2445                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2446                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2447                                mc_filter[i]);
2448                 }
2449
2450                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2451         }
2452
2453         if (rx_mode != bp->rx_mode) {
2454                 bp->rx_mode = rx_mode;
2455                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2456         }
2457
2458         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2459         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2460         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2461
2462         spin_unlock_bh(&bp->phy_lock);
2463 }
2464
2465 #define FW_BUF_SIZE     0x8000
2466
2467 static int
2468 bnx2_gunzip_init(struct bnx2 *bp)
2469 {
2470         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2471                 goto gunzip_nomem1;
2472
2473         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2474                 goto gunzip_nomem2;
2475
2476         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2477         if (bp->strm->workspace == NULL)
2478                 goto gunzip_nomem3;
2479
2480         return 0;
2481
2482 gunzip_nomem3:
2483         kfree(bp->strm);
2484         bp->strm = NULL;
2485
2486 gunzip_nomem2:
2487         vfree(bp->gunzip_buf);
2488         bp->gunzip_buf = NULL;
2489
2490 gunzip_nomem1:
2491         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2492                             "uncompression.\n", bp->dev->name);
2493         return -ENOMEM;
2494 }
2495
2496 static void
2497 bnx2_gunzip_end(struct bnx2 *bp)
2498 {
2499         kfree(bp->strm->workspace);
2500
2501         kfree(bp->strm);
2502         bp->strm = NULL;
2503
2504         if (bp->gunzip_buf) {
2505                 vfree(bp->gunzip_buf);
2506                 bp->gunzip_buf = NULL;
2507         }
2508 }
2509
2510 static int
2511 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2512 {
2513         int n, rc;
2514
2515         /* check gzip header */
2516         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2517                 return -EINVAL;
2518
2519         n = 10;
2520
2521 #define FNAME   0x8
2522         if (zbuf[3] & FNAME)
2523                 while ((zbuf[n++] != 0) && (n < len));
2524
2525         bp->strm->next_in = zbuf + n;
2526         bp->strm->avail_in = len - n;
2527         bp->strm->next_out = bp->gunzip_buf;
2528         bp->strm->avail_out = FW_BUF_SIZE;
2529
2530         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2531         if (rc != Z_OK)
2532                 return rc;
2533
2534         rc = zlib_inflate(bp->strm, Z_FINISH);
2535
2536         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2537         *outbuf = bp->gunzip_buf;
2538
2539         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2540                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2541                        bp->dev->name, bp->strm->msg);
2542
2543         zlib_inflateEnd(bp->strm);
2544
2545         if (rc == Z_STREAM_END)
2546                 return 0;
2547
2548         return rc;
2549 }
2550
2551 static void
2552 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2553         u32 rv2p_proc)
2554 {
2555         int i;
2556         u32 val;
2557
2558
2559         for (i = 0; i < rv2p_code_len; i += 8) {
2560                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2561                 rv2p_code++;
2562                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2563                 rv2p_code++;
2564
2565                 if (rv2p_proc == RV2P_PROC1) {
2566                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2567                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2568                 }
2569                 else {
2570                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2571                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2572                 }
2573         }
2574
2575         /* Reset the processor, un-stall is done later. */
2576         if (rv2p_proc == RV2P_PROC1) {
2577                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2578         }
2579         else {
2580                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2581         }
2582 }
2583
2584 static int
2585 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2586 {
2587         u32 offset;
2588         u32 val;
2589         int rc;
2590
2591         /* Halt the CPU. */
2592         val = REG_RD_IND(bp, cpu_reg->mode);
2593         val |= cpu_reg->mode_value_halt;
2594         REG_WR_IND(bp, cpu_reg->mode, val);
2595         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2596
2597         /* Load the Text area. */
2598         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2599         if (fw->gz_text) {
2600                 u32 text_len;
2601                 void *text;
2602
2603                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2604                                  &text_len);
2605                 if (rc)
2606                         return rc;
2607
2608                 fw->text = text;
2609         }
2610         if (fw->gz_text) {
2611                 int j;
2612
2613                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2614                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2615                 }
2616         }
2617
2618         /* Load the Data area. */
2619         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2620         if (fw->data) {
2621                 int j;
2622
2623                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2624                         REG_WR_IND(bp, offset, fw->data[j]);
2625                 }
2626         }
2627
2628         /* Load the SBSS area. */
2629         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2630         if (fw->sbss) {
2631                 int j;
2632
2633                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2634                         REG_WR_IND(bp, offset, fw->sbss[j]);
2635                 }
2636         }
2637
2638         /* Load the BSS area. */
2639         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2640         if (fw->bss) {
2641                 int j;
2642
2643                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2644                         REG_WR_IND(bp, offset, fw->bss[j]);
2645                 }
2646         }
2647
2648         /* Load the Read-Only area. */
2649         offset = cpu_reg->spad_base +
2650                 (fw->rodata_addr - cpu_reg->mips_view_base);
2651         if (fw->rodata) {
2652                 int j;
2653
2654                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2655                         REG_WR_IND(bp, offset, fw->rodata[j]);
2656                 }
2657         }
2658
2659         /* Clear the pre-fetch instruction. */
2660         REG_WR_IND(bp, cpu_reg->inst, 0);
2661         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2662
2663         /* Start the CPU. */
2664         val = REG_RD_IND(bp, cpu_reg->mode);
2665         val &= ~cpu_reg->mode_value_halt;
2666         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2667         REG_WR_IND(bp, cpu_reg->mode, val);
2668
2669         return 0;
2670 }
2671
2672 static int
2673 bnx2_init_cpus(struct bnx2 *bp)
2674 {
2675         struct cpu_reg cpu_reg;
2676         struct fw_info *fw;
2677         int rc = 0;
2678         void *text;
2679         u32 text_len;
2680
2681         if ((rc = bnx2_gunzip_init(bp)) != 0)
2682                 return rc;
2683
2684         /* Initialize the RV2P processor. */
2685         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2686                          &text_len);
2687         if (rc)
2688                 goto init_cpu_err;
2689
2690         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2691
2692         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2693                          &text_len);
2694         if (rc)
2695                 goto init_cpu_err;
2696
2697         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2698
2699         /* Initialize the RX Processor. */
2700         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2701         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2702         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2703         cpu_reg.state = BNX2_RXP_CPU_STATE;
2704         cpu_reg.state_value_clear = 0xffffff;
2705         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2706         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2707         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2708         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2709         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2710         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2711         cpu_reg.mips_view_base = 0x8000000;
2712
2713         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2714                 fw = &bnx2_rxp_fw_09;
2715         else
2716                 fw = &bnx2_rxp_fw_06;
2717
2718         rc = load_cpu_fw(bp, &cpu_reg, fw);
2719         if (rc)
2720                 goto init_cpu_err;
2721
2722         /* Initialize the TX Processor. */
2723         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2724         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2725         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2726         cpu_reg.state = BNX2_TXP_CPU_STATE;
2727         cpu_reg.state_value_clear = 0xffffff;
2728         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2729         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2730         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2731         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2732         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2733         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2734         cpu_reg.mips_view_base = 0x8000000;
2735
2736         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2737                 fw = &bnx2_txp_fw_09;
2738         else
2739                 fw = &bnx2_txp_fw_06;
2740
2741         rc = load_cpu_fw(bp, &cpu_reg, fw);
2742         if (rc)
2743                 goto init_cpu_err;
2744
2745         /* Initialize the TX Patch-up Processor. */
2746         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2747         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2748         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2749         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2750         cpu_reg.state_value_clear = 0xffffff;
2751         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2752         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2753         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2754         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2755         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2756         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2757         cpu_reg.mips_view_base = 0x8000000;
2758
2759         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2760                 fw = &bnx2_tpat_fw_09;
2761         else
2762                 fw = &bnx2_tpat_fw_06;
2763
2764         rc = load_cpu_fw(bp, &cpu_reg, fw);
2765         if (rc)
2766                 goto init_cpu_err;
2767
2768         /* Initialize the Completion Processor. */
2769         cpu_reg.mode = BNX2_COM_CPU_MODE;
2770         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2771         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2772         cpu_reg.state = BNX2_COM_CPU_STATE;
2773         cpu_reg.state_value_clear = 0xffffff;
2774         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2775         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2776         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2777         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2778         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2779         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2780         cpu_reg.mips_view_base = 0x8000000;
2781
2782         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2783                 fw = &bnx2_com_fw_09;
2784         else
2785                 fw = &bnx2_com_fw_06;
2786
2787         rc = load_cpu_fw(bp, &cpu_reg, fw);
2788         if (rc)
2789                 goto init_cpu_err;
2790
2791         /* Initialize the Command Processor. */
2792         cpu_reg.mode = BNX2_CP_CPU_MODE;
2793         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2794         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2795         cpu_reg.state = BNX2_CP_CPU_STATE;
2796         cpu_reg.state_value_clear = 0xffffff;
2797         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2798         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2799         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2800         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2801         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2802         cpu_reg.spad_base = BNX2_CP_SCRATCH;
2803         cpu_reg.mips_view_base = 0x8000000;
2804
2805         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2806                 fw = &bnx2_cp_fw_09;
2807
2808                 rc = load_cpu_fw(bp, &cpu_reg, fw);
2809                 if (rc)
2810                         goto init_cpu_err;
2811         }
2812 init_cpu_err:
2813         bnx2_gunzip_end(bp);
2814         return rc;
2815 }
2816
2817 static int
2818 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2819 {
2820         u16 pmcsr;
2821
2822         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2823
2824         switch (state) {
2825         case PCI_D0: {
2826                 u32 val;
2827
2828                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2829                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2830                         PCI_PM_CTRL_PME_STATUS);
2831
2832                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2833                         /* delay required during transition out of D3hot */
2834                         msleep(20);
2835
2836                 val = REG_RD(bp, BNX2_EMAC_MODE);
2837                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2838                 val &= ~BNX2_EMAC_MODE_MPKT;
2839                 REG_WR(bp, BNX2_EMAC_MODE, val);
2840
2841                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2842                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2843                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2844                 break;
2845         }
2846         case PCI_D3hot: {
2847                 int i;
2848                 u32 val, wol_msg;
2849
2850                 if (bp->wol) {
2851                         u32 advertising;
2852                         u8 autoneg;
2853
2854                         autoneg = bp->autoneg;
2855                         advertising = bp->advertising;
2856
2857                         bp->autoneg = AUTONEG_SPEED;
2858                         bp->advertising = ADVERTISED_10baseT_Half |
2859                                 ADVERTISED_10baseT_Full |
2860                                 ADVERTISED_100baseT_Half |
2861                                 ADVERTISED_100baseT_Full |
2862                                 ADVERTISED_Autoneg;
2863
2864                         bnx2_setup_copper_phy(bp);
2865
2866                         bp->autoneg = autoneg;
2867                         bp->advertising = advertising;
2868
2869                         bnx2_set_mac_addr(bp);
2870
2871                         val = REG_RD(bp, BNX2_EMAC_MODE);
2872
2873                         /* Enable port mode. */
2874                         val &= ~BNX2_EMAC_MODE_PORT;
2875                         val |= BNX2_EMAC_MODE_PORT_MII |
2876                                BNX2_EMAC_MODE_MPKT_RCVD |
2877                                BNX2_EMAC_MODE_ACPI_RCVD |
2878                                BNX2_EMAC_MODE_MPKT;
2879
2880                         REG_WR(bp, BNX2_EMAC_MODE, val);
2881
2882                         /* receive all multicast */
2883                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2884                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2885                                        0xffffffff);
2886                         }
2887                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2888                                BNX2_EMAC_RX_MODE_SORT_MODE);
2889
2890                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2891                               BNX2_RPM_SORT_USER0_MC_EN;
2892                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2893                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2894                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2895                                BNX2_RPM_SORT_USER0_ENA);
2896
2897                         /* Need to enable EMAC and RPM for WOL. */
2898                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2899                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2900                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2901                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2902
2903                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2904                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2905                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2906
2907                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2908                 }
2909                 else {
2910                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2911                 }
2912
2913                 if (!(bp->flags & NO_WOL_FLAG))
2914                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2915
2916                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2917                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2918                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2919
2920                         if (bp->wol)
2921                                 pmcsr |= 3;
2922                 }
2923                 else {
2924                         pmcsr |= 3;
2925                 }
2926                 if (bp->wol) {
2927                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2928                 }
2929                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2930                                       pmcsr);
2931
2932                 /* No more memory access after this point until
2933                  * device is brought back to D0.
2934                  */
2935                 udelay(50);
2936                 break;
2937         }
2938         default:
2939                 return -EINVAL;
2940         }
2941         return 0;
2942 }
2943
2944 static int
2945 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2946 {
2947         u32 val;
2948         int j;
2949
2950         /* Request access to the flash interface. */
2951         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2952         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2953                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2954                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2955                         break;
2956
2957                 udelay(5);
2958         }
2959
2960         if (j >= NVRAM_TIMEOUT_COUNT)
2961                 return -EBUSY;
2962
2963         return 0;
2964 }
2965
2966 static int
2967 bnx2_release_nvram_lock(struct bnx2 *bp)
2968 {
2969         int j;
2970         u32 val;
2971
2972         /* Relinquish nvram interface. */
2973         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2974
2975         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2976                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2977                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2978                         break;
2979
2980                 udelay(5);
2981         }
2982
2983         if (j >= NVRAM_TIMEOUT_COUNT)
2984                 return -EBUSY;
2985
2986         return 0;
2987 }
2988
2989
2990 static int
2991 bnx2_enable_nvram_write(struct bnx2 *bp)
2992 {
2993         u32 val;
2994
2995         val = REG_RD(bp, BNX2_MISC_CFG);
2996         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2997
2998         if (!bp->flash_info->buffered) {
2999                 int j;
3000
3001                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3002                 REG_WR(bp, BNX2_NVM_COMMAND,
3003                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3004
3005                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3006                         udelay(5);
3007
3008                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3009                         if (val & BNX2_NVM_COMMAND_DONE)
3010                                 break;
3011                 }
3012
3013                 if (j >= NVRAM_TIMEOUT_COUNT)
3014                         return -EBUSY;
3015         }
3016         return 0;
3017 }
3018
3019 static void
3020 bnx2_disable_nvram_write(struct bnx2 *bp)
3021 {
3022         u32 val;
3023
3024         val = REG_RD(bp, BNX2_MISC_CFG);
3025         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3026 }
3027
3028
3029 static void
3030 bnx2_enable_nvram_access(struct bnx2 *bp)
3031 {
3032         u32 val;
3033
3034         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3035         /* Enable both bits, even on read. */
3036         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3037                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3038 }
3039
3040 static void
3041 bnx2_disable_nvram_access(struct bnx2 *bp)
3042 {
3043         u32 val;
3044
3045         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3046         /* Disable both bits, even after read. */
3047         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3048                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3049                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3050 }
3051
3052 static int
3053 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3054 {
3055         u32 cmd;
3056         int j;
3057
3058         if (bp->flash_info->buffered)
3059                 /* Buffered flash, no erase needed */
3060                 return 0;
3061
3062         /* Build an erase command */
3063         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3064               BNX2_NVM_COMMAND_DOIT;
3065
3066         /* Need to clear DONE bit separately. */
3067         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3068
3069         /* Address of the NVRAM to read from. */
3070         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3071
3072         /* Issue an erase command. */
3073         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3074
3075         /* Wait for completion. */
3076         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3077                 u32 val;
3078
3079                 udelay(5);
3080
3081                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3082                 if (val & BNX2_NVM_COMMAND_DONE)
3083                         break;
3084         }
3085
3086         if (j >= NVRAM_TIMEOUT_COUNT)
3087                 return -EBUSY;
3088
3089         return 0;
3090 }
3091
3092 static int
3093 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3094 {
3095         u32 cmd;
3096         int j;
3097
3098         /* Build the command word. */
3099         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3100
3101         /* Calculate an offset of a buffered flash. */
3102         if (bp->flash_info->buffered) {
3103                 offset = ((offset / bp->flash_info->page_size) <<
3104                            bp->flash_info->page_bits) +
3105                           (offset % bp->flash_info->page_size);
3106         }
3107
3108         /* Need to clear DONE bit separately. */
3109         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3110
3111         /* Address of the NVRAM to read from. */
3112         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3113
3114         /* Issue a read command. */
3115         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3116
3117         /* Wait for completion. */
3118         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3119                 u32 val;
3120
3121                 udelay(5);
3122
3123                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3124                 if (val & BNX2_NVM_COMMAND_DONE) {
3125                         val = REG_RD(bp, BNX2_NVM_READ);
3126
3127                         val = be32_to_cpu(val);
3128                         memcpy(ret_val, &val, 4);
3129                         break;
3130                 }
3131         }
3132         if (j >= NVRAM_TIMEOUT_COUNT)
3133                 return -EBUSY;
3134
3135         return 0;
3136 }
3137
3138
3139 static int
3140 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3141 {
3142         u32 cmd, val32;
3143         int j;
3144
3145         /* Build the command word. */
3146         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3147
3148         /* Calculate an offset of a buffered flash. */
3149         if (bp->flash_info->buffered) {
3150                 offset = ((offset / bp->flash_info->page_size) <<
3151                           bp->flash_info->page_bits) +
3152                          (offset % bp->flash_info->page_size);
3153         }
3154
3155         /* Need to clear DONE bit separately. */
3156         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3157
3158         memcpy(&val32, val, 4);
3159         val32 = cpu_to_be32(val32);
3160
3161         /* Write the data. */
3162         REG_WR(bp, BNX2_NVM_WRITE, val32);
3163
3164         /* Address of the NVRAM to write to. */
3165         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3166
3167         /* Issue the write command. */
3168         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3169
3170         /* Wait for completion. */
3171         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3172                 udelay(5);
3173
3174                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3175                         break;
3176         }
3177         if (j >= NVRAM_TIMEOUT_COUNT)
3178                 return -EBUSY;
3179
3180         return 0;
3181 }
3182
3183 static int
3184 bnx2_init_nvram(struct bnx2 *bp)
3185 {
3186         u32 val;
3187         int j, entry_count, rc;
3188         struct flash_spec *flash;
3189
3190         /* Determine the selected interface. */
3191         val = REG_RD(bp, BNX2_NVM_CFG1);
3192
3193         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3194
3195         rc = 0;
3196         if (val & 0x40000000) {
3197
3198                 /* Flash interface has been reconfigured */
3199                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3200                      j++, flash++) {
3201                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3202                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3203                                 bp->flash_info = flash;
3204                                 break;
3205                         }
3206                 }
3207         }
3208         else {
3209                 u32 mask;
3210                 /* Not yet been reconfigured */
3211
3212                 if (val & (1 << 23))
3213                         mask = FLASH_BACKUP_STRAP_MASK;
3214                 else
3215                         mask = FLASH_STRAP_MASK;
3216
3217                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3218                         j++, flash++) {
3219
3220                         if ((val & mask) == (flash->strapping & mask)) {
3221                                 bp->flash_info = flash;
3222
3223                                 /* Request access to the flash interface. */
3224                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3225                                         return rc;
3226
3227                                 /* Enable access to flash interface */
3228                                 bnx2_enable_nvram_access(bp);
3229
3230                                 /* Reconfigure the flash interface */
3231                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3232                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3233                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3234                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3235
3236                                 /* Disable access to flash interface */
3237                                 bnx2_disable_nvram_access(bp);
3238                                 bnx2_release_nvram_lock(bp);
3239
3240                                 break;
3241                         }
3242                 }
3243         } /* if (val & 0x40000000) */
3244
3245         if (j == entry_count) {
3246                 bp->flash_info = NULL;
3247                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3248                 return -ENODEV;
3249         }
3250
3251         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3252         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3253         if (val)
3254                 bp->flash_size = val;
3255         else
3256                 bp->flash_size = bp->flash_info->total_size;
3257
3258         return rc;
3259 }
3260
3261 static int
3262 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3263                 int buf_size)
3264 {
3265         int rc = 0;
3266         u32 cmd_flags, offset32, len32, extra;
3267
3268         if (buf_size == 0)
3269                 return 0;
3270
3271         /* Request access to the flash interface. */
3272         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3273                 return rc;
3274
3275         /* Enable access to flash interface */
3276         bnx2_enable_nvram_access(bp);
3277
3278         len32 = buf_size;
3279         offset32 = offset;
3280         extra = 0;
3281
3282         cmd_flags = 0;
3283
3284         if (offset32 & 3) {
3285                 u8 buf[4];
3286                 u32 pre_len;
3287
3288                 offset32 &= ~3;
3289                 pre_len = 4 - (offset & 3);
3290
3291                 if (pre_len >= len32) {
3292                         pre_len = len32;
3293                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3294                                     BNX2_NVM_COMMAND_LAST;
3295                 }
3296                 else {
3297                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3298                 }
3299
3300                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3301
3302                 if (rc)
3303                         return rc;
3304
3305                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3306
3307                 offset32 += 4;
3308                 ret_buf += pre_len;
3309                 len32 -= pre_len;
3310         }
3311         if (len32 & 3) {
3312                 extra = 4 - (len32 & 3);
3313                 len32 = (len32 + 4) & ~3;
3314         }
3315
3316         if (len32 == 4) {
3317                 u8 buf[4];
3318
3319                 if (cmd_flags)
3320                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3321                 else
3322                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3323                                     BNX2_NVM_COMMAND_LAST;
3324
3325                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3326
3327                 memcpy(ret_buf, buf, 4 - extra);
3328         }
3329         else if (len32 > 0) {
3330                 u8 buf[4];
3331
3332                 /* Read the first word. */
3333                 if (cmd_flags)
3334                         cmd_flags = 0;
3335                 else
3336                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3337
3338                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3339
3340                 /* Advance to the next dword. */
3341                 offset32 += 4;
3342                 ret_buf += 4;
3343                 len32 -= 4;
3344
3345                 while (len32 > 4 && rc == 0) {
3346                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3347
3348                         /* Advance to the next dword. */
3349                         offset32 += 4;
3350                         ret_buf += 4;
3351                         len32 -= 4;
3352                 }
3353
3354                 if (rc)
3355                         return rc;
3356
3357                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3358                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3359
3360                 memcpy(ret_buf, buf, 4 - extra);
3361         }
3362
3363         /* Disable access to flash interface */
3364         bnx2_disable_nvram_access(bp);
3365
3366         bnx2_release_nvram_lock(bp);
3367
3368         return rc;
3369 }
3370
3371 static int
3372 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3373                 int buf_size)
3374 {
3375         u32 written, offset32, len32;
3376         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3377         int rc = 0;
3378         int align_start, align_end;
3379
3380         buf = data_buf;
3381         offset32 = offset;
3382         len32 = buf_size;
3383         align_start = align_end = 0;
3384
3385         if ((align_start = (offset32 & 3))) {
3386                 offset32 &= ~3;
3387                 len32 += align_start;
3388                 if (len32 < 4)
3389                         len32 = 4;
3390                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3391                         return rc;
3392         }
3393
3394         if (len32 & 3) {
3395                 align_end = 4 - (len32 & 3);
3396                 len32 += align_end;
3397                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3398                         return rc;
3399         }
3400
3401         if (align_start || align_end) {
3402                 align_buf = kmalloc(len32, GFP_KERNEL);
3403                 if (align_buf == NULL)
3404                         return -ENOMEM;
3405                 if (align_start) {
3406                         memcpy(align_buf, start, 4);
3407                 }
3408                 if (align_end) {
3409                         memcpy(align_buf + len32 - 4, end, 4);
3410                 }
3411                 memcpy(align_buf + align_start, data_buf, buf_size);
3412                 buf = align_buf;
3413         }
3414
3415         if (bp->flash_info->buffered == 0) {
3416                 flash_buffer = kmalloc(264, GFP_KERNEL);
3417                 if (flash_buffer == NULL) {
3418                         rc = -ENOMEM;
3419                         goto nvram_write_end;
3420                 }
3421         }
3422
3423         written = 0;
3424         while ((written < len32) && (rc == 0)) {
3425                 u32 page_start, page_end, data_start, data_end;
3426                 u32 addr, cmd_flags;
3427                 int i;
3428
3429                 /* Find the page_start addr */
3430                 page_start = offset32 + written;
3431                 page_start -= (page_start % bp->flash_info->page_size);
3432                 /* Find the page_end addr */
3433                 page_end = page_start + bp->flash_info->page_size;
3434                 /* Find the data_start addr */
3435                 data_start = (written == 0) ? offset32 : page_start;
3436                 /* Find the data_end addr */
3437                 data_end = (page_end > offset32 + len32) ?
3438                         (offset32 + len32) : page_end;
3439
3440                 /* Request access to the flash interface. */
3441                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3442                         goto nvram_write_end;
3443
3444                 /* Enable access to flash interface */
3445                 bnx2_enable_nvram_access(bp);
3446
3447                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3448                 if (bp->flash_info->buffered == 0) {
3449                         int j;
3450
3451                         /* Read the whole page into the buffer
3452                          * (non-buffer flash only) */
3453                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3454                                 if (j == (bp->flash_info->page_size - 4)) {
3455                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3456                                 }
3457                                 rc = bnx2_nvram_read_dword(bp,
3458                                         page_start + j,
3459                                         &flash_buffer[j],
3460                                         cmd_flags);
3461
3462                                 if (rc)
3463                                         goto nvram_write_end;
3464
3465                                 cmd_flags = 0;
3466                         }
3467                 }
3468
3469                 /* Enable writes to flash interface (unlock write-protect) */
3470                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3471                         goto nvram_write_end;
3472
3473                 /* Loop to write back the buffer data from page_start to
3474                  * data_start */
3475                 i = 0;
3476                 if (bp->flash_info->buffered == 0) {
3477                         /* Erase the page */
3478                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3479                                 goto nvram_write_end;
3480
3481                         /* Re-enable the write again for the actual write */
3482                         bnx2_enable_nvram_write(bp);
3483
3484                         for (addr = page_start; addr < data_start;
3485                                 addr += 4, i += 4) {
3486
3487                                 rc = bnx2_nvram_write_dword(bp, addr,
3488                                         &flash_buffer[i], cmd_flags);
3489
3490                                 if (rc != 0)
3491                                         goto nvram_write_end;
3492
3493                                 cmd_flags = 0;
3494                         }
3495                 }
3496
3497                 /* Loop to write the new data from data_start to data_end */
3498                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3499                         if ((addr == page_end - 4) ||
3500                                 ((bp->flash_info->buffered) &&
3501                                  (addr == data_end - 4))) {
3502
3503                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3504                         }
3505                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3506                                 cmd_flags);
3507
3508                         if (rc != 0)
3509                                 goto nvram_write_end;
3510
3511                         cmd_flags = 0;
3512                         buf += 4;
3513                 }
3514
3515                 /* Loop to write back the buffer data from data_end
3516                  * to page_end */
3517                 if (bp->flash_info->buffered == 0) {
3518                         for (addr = data_end; addr < page_end;
3519                                 addr += 4, i += 4) {
3520
3521                                 if (addr == page_end-4) {
3522                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3523                                 }
3524                                 rc = bnx2_nvram_write_dword(bp, addr,
3525                                         &flash_buffer[i], cmd_flags);
3526
3527                                 if (rc != 0)
3528                                         goto nvram_write_end;
3529
3530                                 cmd_flags = 0;
3531                         }
3532                 }
3533
3534                 /* Disable writes to flash interface (lock write-protect) */
3535                 bnx2_disable_nvram_write(bp);
3536
3537                 /* Disable access to flash interface */
3538                 bnx2_disable_nvram_access(bp);
3539                 bnx2_release_nvram_lock(bp);
3540
3541                 /* Increment written */
3542                 written += data_end - data_start;
3543         }
3544
3545 nvram_write_end:
3546         kfree(flash_buffer);
3547         kfree(align_buf);
3548         return rc;
3549 }
3550
3551 static int
3552 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3553 {
3554         u32 val;
3555         int i, rc = 0;
3556
3557         /* Wait for the current PCI transaction to complete before
3558          * issuing a reset. */
3559         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3560                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3561                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3562                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3563                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3564         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3565         udelay(5);
3566
3567         /* Wait for the firmware to tell us it is ok to issue a reset. */
3568         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3569
3570         /* Deposit a driver reset signature so the firmware knows that
3571          * this is a soft reset. */
3572         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3573                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3574
3575         /* Do a dummy read to force the chip to complete all current transaction
3576          * before we issue a reset. */
3577         val = REG_RD(bp, BNX2_MISC_ID);
3578
3579         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3580                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3581                 REG_RD(bp, BNX2_MISC_COMMAND);
3582                 udelay(5);
3583
3584                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3585                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3586
3587                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3588
3589         } else {
3590                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3591                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3592                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3593
3594                 /* Chip reset. */
3595                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3596
3597                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3598                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3599                         current->state = TASK_UNINTERRUPTIBLE;
3600                         schedule_timeout(HZ / 50);
3601                 }
3602
3603                 /* Reset takes approximate 30 usec */
3604                 for (i = 0; i < 10; i++) {
3605                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3606                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3607                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3608                                 break;
3609                         udelay(10);
3610                 }
3611
3612                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3613                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3614                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3615                         return -EBUSY;
3616                 }
3617         }
3618
3619         /* Make sure byte swapping is properly configured. */
3620         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3621         if (val != 0x01020304) {
3622                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3623                 return -ENODEV;
3624         }
3625
3626         /* Wait for the firmware to finish its initialization. */
3627         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3628         if (rc)
3629                 return rc;
3630
3631         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3632                 /* Adjust the voltage regular to two steps lower.  The default
3633                  * of this register is 0x0000000e. */
3634                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3635
3636                 /* Remove bad rbuf memory from the free pool. */
3637                 rc = bnx2_alloc_bad_rbuf(bp);
3638         }
3639
3640         return rc;
3641 }
3642
3643 static int
3644 bnx2_init_chip(struct bnx2 *bp)
3645 {
3646         u32 val;
3647         int rc;
3648
3649         /* Make sure the interrupt is not active. */
3650         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3651
3652         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3653               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3654 #ifdef __BIG_ENDIAN
3655               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3656 #endif
3657               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3658               DMA_READ_CHANS << 12 |
3659               DMA_WRITE_CHANS << 16;
3660
3661         val |= (0x2 << 20) | (1 << 11);
3662
3663         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3664                 val |= (1 << 23);
3665
3666         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3667             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3668                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3669
3670         REG_WR(bp, BNX2_DMA_CONFIG, val);
3671
3672         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3673                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3674                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3675                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3676         }
3677
3678         if (bp->flags & PCIX_FLAG) {
3679                 u16 val16;
3680
3681                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3682                                      &val16);
3683                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3684                                       val16 & ~PCI_X_CMD_ERO);
3685         }
3686
3687         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3688                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3689                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3690                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3691
3692         /* Initialize context mapping and zero out the quick contexts.  The
3693          * context block must have already been enabled. */
3694         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3695                 bnx2_init_5709_context(bp);
3696         else
3697                 bnx2_init_context(bp);
3698
3699         if ((rc = bnx2_init_cpus(bp)) != 0)
3700                 return rc;
3701
3702         bnx2_init_nvram(bp);
3703
3704         bnx2_set_mac_addr(bp);
3705
3706         val = REG_RD(bp, BNX2_MQ_CONFIG);
3707         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3708         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3709         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3710                 val |= BNX2_MQ_CONFIG_HALT_DIS;
3711
3712         REG_WR(bp, BNX2_MQ_CONFIG, val);
3713
3714         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3715         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3716         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3717
3718         val = (BCM_PAGE_BITS - 8) << 24;
3719         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3720
3721         /* Configure page size. */
3722         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3723         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3724         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3725         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3726
3727         val = bp->mac_addr[0] +
3728               (bp->mac_addr[1] << 8) +
3729               (bp->mac_addr[2] << 16) +
3730               bp->mac_addr[3] +
3731               (bp->mac_addr[4] << 8) +
3732               (bp->mac_addr[5] << 16);
3733         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3734
3735         /* Program the MTU.  Also include 4 bytes for CRC32. */
3736         val = bp->dev->mtu + ETH_HLEN + 4;
3737         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3738                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3739         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3740
3741         bp->last_status_idx = 0;
3742         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3743
3744         /* Set up how to generate a link change interrupt. */
3745         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3746
3747         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3748                (u64) bp->status_blk_mapping & 0xffffffff);
3749         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3750
3751         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3752                (u64) bp->stats_blk_mapping & 0xffffffff);
3753         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3754                (u64) bp->stats_blk_mapping >> 32);
3755
3756         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3757                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3758
3759         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3760                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3761
3762         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3763                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3764
3765         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3766
3767         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3768
3769         REG_WR(bp, BNX2_HC_COM_TICKS,
3770                (bp->com_ticks_int << 16) | bp->com_ticks);
3771
3772         REG_WR(bp, BNX2_HC_CMD_TICKS,
3773                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3774
3775         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3776         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3777
3778         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3779                 val = BNX2_HC_CONFIG_COLLECT_STATS;
3780         else {
3781                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
3782                       BNX2_HC_CONFIG_COLLECT_STATS;
3783         }
3784
3785         if (bp->flags & ONE_SHOT_MSI_FLAG)
3786                 val |= BNX2_HC_CONFIG_ONE_SHOT;
3787
3788         REG_WR(bp, BNX2_HC_CONFIG, val);
3789
3790         /* Clear internal stats counters. */
3791         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3792
3793         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
3794
3795         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3796             BNX2_PORT_FEATURE_ASF_ENABLED)
3797                 bp->flags |= ASF_ENABLE_FLAG;
3798
3799         /* Initialize the receive filter. */
3800         bnx2_set_rx_mode(bp->dev);
3801
3802         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3803                           0);
3804
3805         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3806         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3807
3808         udelay(20);
3809
3810         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3811
3812         return rc;
3813 }
3814
3815 static void
3816 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3817 {
3818         u32 val, offset0, offset1, offset2, offset3;
3819
3820         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3821                 offset0 = BNX2_L2CTX_TYPE_XI;
3822                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3823                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3824                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3825         } else {
3826                 offset0 = BNX2_L2CTX_TYPE;
3827                 offset1 = BNX2_L2CTX_CMD_TYPE;
3828                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3829                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3830         }
3831         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3832         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3833
3834         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3835         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3836
3837         val = (u64) bp->tx_desc_mapping >> 32;
3838         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3839
3840         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3841         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3842 }
3843
3844 static void
3845 bnx2_init_tx_ring(struct bnx2 *bp)
3846 {
3847         struct tx_bd *txbd;
3848         u32 cid;
3849
3850         bp->tx_wake_thresh = bp->tx_ring_size / 2;
3851
3852         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3853
3854         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3855         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3856
3857         bp->tx_prod = 0;
3858         bp->tx_cons = 0;
3859         bp->hw_tx_cons = 0;
3860         bp->tx_prod_bseq = 0;
3861
3862         cid = TX_CID;
3863         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3864         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3865
3866         bnx2_init_tx_context(bp, cid);
3867 }
3868
3869 static void
3870 bnx2_init_rx_ring(struct bnx2 *bp)
3871 {
3872         struct rx_bd *rxbd;
3873         int i;
3874         u16 prod, ring_prod;
3875         u32 val;
3876
3877         /* 8 for CRC and VLAN */
3878         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3879         /* hw alignment */
3880         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3881
3882         ring_prod = prod = bp->rx_prod = 0;
3883         bp->rx_cons = 0;
3884         bp->hw_rx_cons = 0;
3885         bp->rx_prod_bseq = 0;
3886
3887         for (i = 0; i < bp->rx_max_ring; i++) {
3888                 int j;
3889
3890                 rxbd = &bp->rx_desc_ring[i][0];
3891                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3892                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3893                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3894                 }
3895                 if (i == (bp->rx_max_ring - 1))
3896                         j = 0;
3897                 else
3898                         j = i + 1;
3899                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3900                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3901                                        0xffffffff;
3902         }
3903
3904         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3905         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3906         val |= 0x02 << 8;
3907         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3908
3909         val = (u64) bp->rx_desc_mapping[0] >> 32;
3910         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3911
3912         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3913         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3914
3915         for (i = 0; i < bp->rx_ring_size; i++) {
3916                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3917                         break;
3918                 }
3919                 prod = NEXT_RX_BD(prod);
3920                 ring_prod = RX_RING_IDX(prod);
3921         }
3922         bp->rx_prod = prod;
3923
3924         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3925
3926         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3927 }
3928
3929 static void
3930 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3931 {
3932         u32 num_rings, max;
3933
3934         bp->rx_ring_size = size;
3935         num_rings = 1;
3936         while (size > MAX_RX_DESC_CNT) {
3937                 size -= MAX_RX_DESC_CNT;
3938                 num_rings++;
3939         }
3940         /* round to next power of 2 */
3941         max = MAX_RX_RINGS;
3942         while ((max & num_rings) == 0)
3943                 max >>= 1;
3944
3945         if (num_rings != max)
3946                 max <<= 1;
3947
3948         bp->rx_max_ring = max;
3949         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3950 }
3951
3952 static void
3953 bnx2_free_tx_skbs(struct bnx2 *bp)
3954 {
3955         int i;
3956
3957         if (bp->tx_buf_ring == NULL)
3958                 return;
3959
3960         for (i = 0; i < TX_DESC_CNT; ) {
3961                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3962                 struct sk_buff *skb = tx_buf->skb;
3963                 int j, last;
3964
3965                 if (skb == NULL) {
3966                         i++;
3967                         continue;
3968                 }
3969
3970                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3971                         skb_headlen(skb), PCI_DMA_TODEVICE);
3972
3973                 tx_buf->skb = NULL;
3974
3975                 last = skb_shinfo(skb)->nr_frags;
3976                 for (j = 0; j < last; j++) {
3977                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3978                         pci_unmap_page(bp->pdev,
3979                                 pci_unmap_addr(tx_buf, mapping),
3980                                 skb_shinfo(skb)->frags[j].size,
3981                                 PCI_DMA_TODEVICE);
3982                 }
3983                 dev_kfree_skb(skb);
3984                 i += j + 1;
3985         }
3986
3987 }
3988
3989 static void
3990 bnx2_free_rx_skbs(struct bnx2 *bp)
3991 {
3992         int i;
3993
3994         if (bp->rx_buf_ring == NULL)
3995                 return;
3996
3997         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3998                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3999                 struct sk_buff *skb = rx_buf->skb;
4000
4001                 if (skb == NULL)
4002                         continue;
4003
4004                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4005                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4006
4007                 rx_buf->skb = NULL;
4008
4009                 dev_kfree_skb(skb);
4010         }
4011 }
4012
4013 static void
4014 bnx2_free_skbs(struct bnx2 *bp)
4015 {
4016         bnx2_free_tx_skbs(bp);
4017         bnx2_free_rx_skbs(bp);
4018 }
4019
4020 static int
4021 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4022 {
4023         int rc;
4024
4025         rc = bnx2_reset_chip(bp, reset_code);
4026         bnx2_free_skbs(bp);
4027         if (rc)
4028                 return rc;
4029
4030         if ((rc = bnx2_init_chip(bp)) != 0)
4031                 return rc;
4032
4033         bnx2_init_tx_ring(bp);
4034         bnx2_init_rx_ring(bp);
4035         return 0;
4036 }
4037
4038 static int
4039 bnx2_init_nic(struct bnx2 *bp)
4040 {
4041         int rc;
4042
4043         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4044                 return rc;
4045
4046         spin_lock_bh(&bp->phy_lock);
4047         bnx2_init_phy(bp);
4048         spin_unlock_bh(&bp->phy_lock);
4049         bnx2_set_link(bp);
4050         return 0;
4051 }
4052
4053 static int
4054 bnx2_test_registers(struct bnx2 *bp)
4055 {
4056         int ret;
4057         int i, is_5709;
4058         static const struct {
4059                 u16   offset;
4060                 u16   flags;
4061 #define BNX2_FL_NOT_5709        1
4062                 u32   rw_mask;
4063                 u32   ro_mask;
4064         } reg_tbl[] = {
4065                 { 0x006c, 0, 0x00000000, 0x0000003f },
4066                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4067                 { 0x0094, 0, 0x00000000, 0x00000000 },
4068
4069                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4070                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4071                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4072                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4073                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4074                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4075                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4076                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4077                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4078
4079                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4080                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4081                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4082                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4083                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4084                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4085
4086                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4087                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4088                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4089
4090                 { 0x1000, 0, 0x00000000, 0x00000001 },
4091                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4092
4093                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4094                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4095                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4096                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4097                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4098                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4099                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4100                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4101                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4102                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4103
4104                 { 0x1800, 0, 0x00000000, 0x00000001 },
4105                 { 0x1804, 0, 0x00000000, 0x00000003 },
4106
4107                 { 0x2800, 0, 0x00000000, 0x00000001 },
4108                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4109                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4110                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4111                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4112                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4113                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4114                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4115                 { 0x2840, 0, 0x00000000, 0xffffffff },
4116                 { 0x2844, 0, 0x00000000, 0xffffffff },
4117                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4118                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4119
4120                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4121                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4122
4123                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4124                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4125                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4126                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4127                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4128                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4129                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4130                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4131                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4132
4133                 { 0x5004, 0, 0x00000000, 0x0000007f },
4134                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4135
4136                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4137                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4138                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4139                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4140                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4141                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4142                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4143                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4144                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4145
4146                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4147                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4148                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4149                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4150                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4151                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4152                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4153                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4154                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4155                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4156                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4157                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4158                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4159                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4160                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4161                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4162                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4163                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4164                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4165                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4166                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4167                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4168                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4169
4170                 { 0xffff, 0, 0x00000000, 0x00000000 },
4171         };
4172
4173         ret = 0;
4174         is_5709 = 0;
4175         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4176                 is_5709 = 1;
4177
4178         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4179                 u32 offset, rw_mask, ro_mask, save_val, val;
4180                 u16 flags = reg_tbl[i].flags;
4181
4182                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4183                         continue;
4184
4185                 offset = (u32) reg_tbl[i].offset;
4186                 rw_mask = reg_tbl[i].rw_mask;
4187                 ro_mask = reg_tbl[i].ro_mask;
4188
4189                 save_val = readl(bp->regview + offset);
4190
4191                 writel(0, bp->regview + offset);
4192
4193                 val = readl(bp->regview + offset);
4194                 if ((val & rw_mask) != 0) {
4195                         goto reg_test_err;
4196                 }
4197
4198                 if ((val & ro_mask) != (save_val & ro_mask)) {
4199                         goto reg_test_err;
4200                 }
4201
4202                 writel(0xffffffff, bp->regview + offset);
4203
4204                 val = readl(bp->regview + offset);
4205                 if ((val & rw_mask) != rw_mask) {
4206                         goto reg_test_err;
4207                 }
4208
4209                 if ((val & ro_mask) != (save_val & ro_mask)) {
4210                         goto reg_test_err;
4211                 }
4212
4213                 writel(save_val, bp->regview + offset);
4214                 continue;
4215
4216 reg_test_err:
4217                 writel(save_val, bp->regview + offset);
4218                 ret = -ENODEV;
4219                 break;
4220         }
4221         return ret;
4222 }
4223
4224 static int
4225 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4226 {
4227         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4228                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4229         int i;
4230
4231         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4232                 u32 offset;
4233
4234                 for (offset = 0; offset < size; offset += 4) {
4235
4236                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4237
4238                         if (REG_RD_IND(bp, start + offset) !=
4239                                 test_pattern[i]) {
4240                                 return -ENODEV;
4241                         }
4242                 }
4243         }
4244         return 0;
4245 }
4246
4247 static int
4248 bnx2_test_memory(struct bnx2 *bp)
4249 {
4250         int ret = 0;
4251         int i;
4252         static struct mem_entry {
4253                 u32   offset;
4254                 u32   len;
4255         } mem_tbl_5706[] = {
4256                 { 0x60000,  0x4000 },
4257                 { 0xa0000,  0x3000 },
4258                 { 0xe0000,  0x4000 },
4259                 { 0x120000, 0x4000 },
4260                 { 0x1a0000, 0x4000 },
4261                 { 0x160000, 0x4000 },
4262                 { 0xffffffff, 0    },
4263         },
4264         mem_tbl_5709[] = {
4265                 { 0x60000,  0x4000 },
4266                 { 0xa0000,  0x3000 },
4267                 { 0xe0000,  0x4000 },
4268                 { 0x120000, 0x4000 },
4269                 { 0x1a0000, 0x4000 },
4270                 { 0xffffffff, 0    },
4271         };
4272         struct mem_entry *mem_tbl;
4273
4274         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4275                 mem_tbl = mem_tbl_5709;
4276         else
4277                 mem_tbl = mem_tbl_5706;
4278
4279         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4280                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4281                         mem_tbl[i].len)) != 0) {
4282                         return ret;
4283                 }
4284         }
4285
4286         return ret;
4287 }
4288
4289 #define BNX2_MAC_LOOPBACK       0
4290 #define BNX2_PHY_LOOPBACK       1
4291
4292 static int
4293 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4294 {
4295         unsigned int pkt_size, num_pkts, i;
4296         struct sk_buff *skb, *rx_skb;
4297         unsigned char *packet;
4298         u16 rx_start_idx, rx_idx;
4299         dma_addr_t map;
4300         struct tx_bd *txbd;
4301         struct sw_bd *rx_buf;
4302         struct l2_fhdr *rx_hdr;
4303         int ret = -ENODEV;
4304
4305         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4306                 bp->loopback = MAC_LOOPBACK;
4307                 bnx2_set_mac_loopback(bp);
4308         }
4309         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4310                 bp->loopback = PHY_LOOPBACK;
4311                 bnx2_set_phy_loopback(bp);
4312         }
4313         else
4314                 return -EINVAL;
4315
4316         pkt_size = 1514;
4317         skb = netdev_alloc_skb(bp->dev, pkt_size);
4318         if (!skb)
4319                 return -ENOMEM;
4320         packet = skb_put(skb, pkt_size);
4321         memcpy(packet, bp->dev->dev_addr, 6);
4322         memset(packet + 6, 0x0, 8);
4323         for (i = 14; i < pkt_size; i++)
4324                 packet[i] = (unsigned char) (i & 0xff);
4325
4326         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4327                 PCI_DMA_TODEVICE);
4328
4329         REG_WR(bp, BNX2_HC_COMMAND,
4330                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4331
4332         REG_RD(bp, BNX2_HC_COMMAND);
4333
4334         udelay(5);
4335         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4336
4337         num_pkts = 0;
4338
4339         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4340
4341         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4342         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4343         txbd->tx_bd_mss_nbytes = pkt_size;
4344         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4345
4346         num_pkts++;
4347         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4348         bp->tx_prod_bseq += pkt_size;
4349
4350         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4351         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4352
4353         udelay(100);
4354
4355         REG_WR(bp, BNX2_HC_COMMAND,
4356                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4357
4358         REG_RD(bp, BNX2_HC_COMMAND);
4359
4360         udelay(5);
4361
4362         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4363         dev_kfree_skb(skb);
4364
4365         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4366                 goto loopback_test_done;
4367         }
4368
4369         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4370         if (rx_idx != rx_start_idx + num_pkts) {
4371                 goto loopback_test_done;
4372         }
4373
4374         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4375         rx_skb = rx_buf->skb;
4376
4377         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4378         skb_reserve(rx_skb, bp->rx_offset);
4379
4380         pci_dma_sync_single_for_cpu(bp->pdev,
4381                 pci_unmap_addr(rx_buf, mapping),
4382                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4383
4384         if (rx_hdr->l2_fhdr_status &
4385                 (L2_FHDR_ERRORS_BAD_CRC |
4386                 L2_FHDR_ERRORS_PHY_DECODE |
4387                 L2_FHDR_ERRORS_ALIGNMENT |
4388                 L2_FHDR_ERRORS_TOO_SHORT |
4389                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4390
4391                 goto loopback_test_done;
4392         }
4393
4394         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4395                 goto loopback_test_done;
4396         }
4397
4398         for (i = 14; i < pkt_size; i++) {
4399                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4400                         goto loopback_test_done;
4401                 }
4402         }
4403
4404         ret = 0;
4405
4406 loopback_test_done:
4407         bp->loopback = 0;
4408         return ret;
4409 }
4410
4411 #define BNX2_MAC_LOOPBACK_FAILED        1
4412 #define BNX2_PHY_LOOPBACK_FAILED        2
4413 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4414                                          BNX2_PHY_LOOPBACK_FAILED)
4415
4416 static int
4417 bnx2_test_loopback(struct bnx2 *bp)
4418 {
4419         int rc = 0;
4420
4421         if (!netif_running(bp->dev))
4422                 return BNX2_LOOPBACK_FAILED;
4423
4424         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4425         spin_lock_bh(&bp->phy_lock);
4426         bnx2_init_phy(bp);
4427         spin_unlock_bh(&bp->phy_lock);
4428         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4429                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4430         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4431                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4432         return rc;
4433 }
4434
4435 #define NVRAM_SIZE 0x200
4436 #define CRC32_RESIDUAL 0xdebb20e3
4437
4438 static int
4439 bnx2_test_nvram(struct bnx2 *bp)
4440 {
4441         u32 buf[NVRAM_SIZE / 4];
4442         u8 *data = (u8 *) buf;
4443         int rc = 0;
4444         u32 magic, csum;
4445
4446         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4447                 goto test_nvram_done;
4448
4449         magic = be32_to_cpu(buf[0]);
4450         if (magic != 0x669955aa) {
4451                 rc = -ENODEV;
4452                 goto test_nvram_done;
4453         }
4454
4455         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4456                 goto test_nvram_done;
4457
4458         csum = ether_crc_le(0x100, data);
4459         if (csum != CRC32_RESIDUAL) {
4460                 rc = -ENODEV;
4461                 goto test_nvram_done;
4462         }
4463
4464         csum = ether_crc_le(0x100, data + 0x100);
4465         if (csum != CRC32_RESIDUAL) {
4466                 rc = -ENODEV;
4467         }
4468
4469 test_nvram_done:
4470         return rc;
4471 }
4472
4473 static int
4474 bnx2_test_link(struct bnx2 *bp)
4475 {
4476         u32 bmsr;
4477
4478         spin_lock_bh(&bp->phy_lock);
4479         bnx2_enable_bmsr1(bp);
4480         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4481         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4482         bnx2_disable_bmsr1(bp);
4483         spin_unlock_bh(&bp->phy_lock);
4484
4485         if (bmsr & BMSR_LSTATUS) {
4486                 return 0;
4487         }
4488         return -ENODEV;
4489 }
4490
4491 static int
4492 bnx2_test_intr(struct bnx2 *bp)
4493 {
4494         int i;
4495         u16 status_idx;
4496
4497         if (!netif_running(bp->dev))
4498                 return -ENODEV;
4499
4500         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4501
4502         /* This register is not touched during run-time. */
4503         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4504         REG_RD(bp, BNX2_HC_COMMAND);
4505
4506         for (i = 0; i < 10; i++) {
4507                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4508                         status_idx) {
4509
4510                         break;
4511                 }
4512
4513                 msleep_interruptible(10);
4514         }
4515         if (i < 10)
4516                 return 0;
4517
4518         return -ENODEV;
4519 }
4520
4521 static void
4522 bnx2_5706_serdes_timer(struct bnx2 *bp)
4523 {
4524         spin_lock(&bp->phy_lock);
4525         if (bp->serdes_an_pending)
4526                 bp->serdes_an_pending--;
4527         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4528                 u32 bmcr;
4529
4530                 bp->current_interval = bp->timer_interval;
4531
4532                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4533
4534                 if (bmcr & BMCR_ANENABLE) {
4535                         u32 phy1, phy2;
4536
4537                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4538                         bnx2_read_phy(bp, 0x1c, &phy1);
4539
4540                         bnx2_write_phy(bp, 0x17, 0x0f01);
4541                         bnx2_read_phy(bp, 0x15, &phy2);
4542                         bnx2_write_phy(bp, 0x17, 0x0f01);
4543                         bnx2_read_phy(bp, 0x15, &phy2);
4544
4545                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4546                                 !(phy2 & 0x20)) {       /* no CONFIG */
4547
4548                                 bmcr &= ~BMCR_ANENABLE;
4549                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4550                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4551                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4552                         }
4553                 }
4554         }
4555         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4556                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4557                 u32 phy2;
4558
4559                 bnx2_write_phy(bp, 0x17, 0x0f01);
4560                 bnx2_read_phy(bp, 0x15, &phy2);
4561                 if (phy2 & 0x20) {
4562                         u32 bmcr;
4563
4564                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4565                         bmcr |= BMCR_ANENABLE;
4566                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4567
4568                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4569                 }
4570         } else
4571                 bp->current_interval = bp->timer_interval;
4572
4573         spin_unlock(&bp->phy_lock);
4574 }
4575
4576 static void
4577 bnx2_5708_serdes_timer(struct bnx2 *bp)
4578 {
4579         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4580                 bp->serdes_an_pending = 0;
4581                 return;
4582         }
4583
4584         spin_lock(&bp->phy_lock);
4585         if (bp->serdes_an_pending)
4586                 bp->serdes_an_pending--;
4587         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4588                 u32 bmcr;
4589
4590                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4591                 if (bmcr & BMCR_ANENABLE) {
4592                         bnx2_enable_forced_2g5(bp);
4593                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4594                 } else {
4595                         bnx2_disable_forced_2g5(bp);
4596                         bp->serdes_an_pending = 2;
4597                         bp->current_interval = bp->timer_interval;
4598                 }
4599
4600         } else
4601                 bp->current_interval = bp->timer_interval;
4602
4603         spin_unlock(&bp->phy_lock);
4604 }
4605
4606 static void
4607 bnx2_timer(unsigned long data)
4608 {
4609         struct bnx2 *bp = (struct bnx2 *) data;
4610         u32 msg;
4611
4612         if (!netif_running(bp->dev))
4613                 return;
4614
4615         if (atomic_read(&bp->intr_sem) != 0)
4616                 goto bnx2_restart_timer;
4617
4618         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4619         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4620
4621         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4622
4623         if (bp->phy_flags & PHY_SERDES_FLAG) {
4624                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4625                         bnx2_5706_serdes_timer(bp);
4626                 else
4627                         bnx2_5708_serdes_timer(bp);
4628         }
4629
4630 bnx2_restart_timer:
4631         mod_timer(&bp->timer, jiffies + bp->current_interval);
4632 }
4633
4634 static int
4635 bnx2_request_irq(struct bnx2 *bp)
4636 {
4637         struct net_device *dev = bp->dev;
4638         int rc = 0;
4639
4640         if (bp->flags & USING_MSI_FLAG) {
4641                 irq_handler_t   fn = bnx2_msi;
4642
4643                 if (bp->flags & ONE_SHOT_MSI_FLAG)
4644                         fn = bnx2_msi_1shot;
4645
4646                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4647         } else
4648                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4649                                  IRQF_SHARED, dev->name, dev);
4650         return rc;
4651 }
4652
4653 static void
4654 bnx2_free_irq(struct bnx2 *bp)
4655 {
4656         struct net_device *dev = bp->dev;
4657
4658         if (bp->flags & USING_MSI_FLAG) {
4659                 free_irq(bp->pdev->irq, dev);
4660                 pci_disable_msi(bp->pdev);
4661                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4662         } else
4663                 free_irq(bp->pdev->irq, dev);
4664 }
4665
4666 /* Called with rtnl_lock */
4667 static int
4668 bnx2_open(struct net_device *dev)
4669 {
4670         struct bnx2 *bp = netdev_priv(dev);
4671         int rc;
4672
4673         netif_carrier_off(dev);
4674
4675         bnx2_set_power_state(bp, PCI_D0);
4676         bnx2_disable_int(bp);
4677
4678         rc = bnx2_alloc_mem(bp);
4679         if (rc)
4680                 return rc;
4681
4682         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4683                 if (pci_enable_msi(bp->pdev) == 0) {
4684                         bp->flags |= USING_MSI_FLAG;
4685                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4686                                 bp->flags |= ONE_SHOT_MSI_FLAG;
4687                 }
4688         }
4689         rc = bnx2_request_irq(bp);
4690
4691         if (rc) {
4692                 bnx2_free_mem(bp);
4693                 return rc;
4694         }
4695
4696         rc = bnx2_init_nic(bp);
4697
4698         if (rc) {
4699                 bnx2_free_irq(bp);
4700                 bnx2_free_skbs(bp);
4701                 bnx2_free_mem(bp);
4702                 return rc;
4703         }
4704
4705         mod_timer(&bp->timer, jiffies + bp->current_interval);
4706
4707         atomic_set(&bp->intr_sem, 0);
4708
4709         bnx2_enable_int(bp);
4710
4711         if (bp->flags & USING_MSI_FLAG) {
4712                 /* Test MSI to make sure it is working
4713                  * If MSI test fails, go back to INTx mode
4714                  */
4715                 if (bnx2_test_intr(bp) != 0) {
4716                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4717                                " using MSI, switching to INTx mode. Please"
4718                                " report this failure to the PCI maintainer"
4719                                " and include system chipset information.\n",
4720                                bp->dev->name);
4721
4722                         bnx2_disable_int(bp);
4723                         bnx2_free_irq(bp);
4724
4725                         rc = bnx2_init_nic(bp);
4726
4727                         if (!rc)
4728                                 rc = bnx2_request_irq(bp);
4729
4730                         if (rc) {
4731                                 bnx2_free_skbs(bp);
4732                                 bnx2_free_mem(bp);
4733                                 del_timer_sync(&bp->timer);
4734                                 return rc;
4735                         }
4736                         bnx2_enable_int(bp);
4737                 }
4738         }
4739         if (bp->flags & USING_MSI_FLAG) {
4740                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4741         }
4742
4743         netif_start_queue(dev);
4744
4745         return 0;
4746 }
4747
4748 static void
4749 bnx2_reset_task(struct work_struct *work)
4750 {
4751         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4752
4753         if (!netif_running(bp->dev))
4754                 return;
4755
4756         bp->in_reset_task = 1;
4757         bnx2_netif_stop(bp);
4758
4759         bnx2_init_nic(bp);
4760
4761         atomic_set(&bp->intr_sem, 1);
4762         bnx2_netif_start(bp);
4763         bp->in_reset_task = 0;
4764 }
4765
4766 static void
4767 bnx2_tx_timeout(struct net_device *dev)
4768 {
4769         struct bnx2 *bp = netdev_priv(dev);
4770
4771         /* This allows the netif to be shutdown gracefully before resetting */
4772         schedule_work(&bp->reset_task);
4773 }
4774
4775 #ifdef BCM_VLAN
4776 /* Called with rtnl_lock */
4777 static void
4778 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4779 {
4780         struct bnx2 *bp = netdev_priv(dev);
4781
4782         bnx2_netif_stop(bp);
4783
4784         bp->vlgrp = vlgrp;
4785         bnx2_set_rx_mode(dev);
4786
4787         bnx2_netif_start(bp);
4788 }
4789
4790 /* Called with rtnl_lock */
4791 static void
4792 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4793 {
4794         struct bnx2 *bp = netdev_priv(dev);
4795
4796         bnx2_netif_stop(bp);
4797         vlan_group_set_device(bp->vlgrp, vid, NULL);
4798         bnx2_set_rx_mode(dev);
4799
4800         bnx2_netif_start(bp);
4801 }
4802 #endif
4803
4804 /* Called with netif_tx_lock.
4805  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4806  * netif_wake_queue().
4807  */
4808 static int
4809 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4810 {
4811         struct bnx2 *bp = netdev_priv(dev);
4812         dma_addr_t mapping;
4813         struct tx_bd *txbd;
4814         struct sw_bd *tx_buf;
4815         u32 len, vlan_tag_flags, last_frag, mss;
4816         u16 prod, ring_prod;
4817         int i;
4818
4819         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4820                 netif_stop_queue(dev);
4821                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4822                         dev->name);
4823
4824                 return NETDEV_TX_BUSY;
4825         }
4826         len = skb_headlen(skb);
4827         prod = bp->tx_prod;
4828         ring_prod = TX_RING_IDX(prod);
4829
4830         vlan_tag_flags = 0;
4831         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4832                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4833         }
4834
4835         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4836                 vlan_tag_flags |=
4837                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4838         }
4839         if ((mss = skb_shinfo(skb)->gso_size)) {
4840                 u32 tcp_opt_len, ip_tcp_len;
4841                 struct iphdr *iph;
4842
4843                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4844
4845                 tcp_opt_len = tcp_optlen(skb);
4846
4847                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4848                         u32 tcp_off = skb_transport_offset(skb) -
4849                                       sizeof(struct ipv6hdr) - ETH_HLEN;
4850
4851                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4852                                           TX_BD_FLAGS_SW_FLAGS;
4853                         if (likely(tcp_off == 0))
4854                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4855                         else {
4856                                 tcp_off >>= 3;
4857                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
4858                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
4859                                                   ((tcp_off & 0x10) <<
4860                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
4861                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4862                         }
4863                 } else {
4864                         if (skb_header_cloned(skb) &&
4865                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4866                                 dev_kfree_skb(skb);
4867                                 return NETDEV_TX_OK;
4868                         }
4869
4870                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4871
4872                         iph = ip_hdr(skb);
4873                         iph->check = 0;
4874                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4875                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4876                                                                  iph->daddr, 0,
4877                                                                  IPPROTO_TCP,
4878                                                                  0);
4879                         if (tcp_opt_len || (iph->ihl > 5)) {
4880                                 vlan_tag_flags |= ((iph->ihl - 5) +
4881                                                    (tcp_opt_len >> 2)) << 8;
4882                         }
4883                 }
4884         } else
4885                 mss = 0;
4886
4887         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4888
4889         tx_buf = &bp->tx_buf_ring[ring_prod];
4890         tx_buf->skb = skb;
4891         pci_unmap_addr_set(tx_buf, mapping, mapping);
4892
4893         txbd = &bp->tx_desc_ring[ring_prod];
4894
4895         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4896         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4897         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4898         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4899
4900         last_frag = skb_shinfo(skb)->nr_frags;
4901
4902         for (i = 0; i < last_frag; i++) {
4903                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4904
4905                 prod = NEXT_TX_BD(prod);
4906                 ring_prod = TX_RING_IDX(prod);
4907                 txbd = &bp->tx_desc_ring[ring_prod];
4908
4909                 len = frag->size;
4910                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4911                         len, PCI_DMA_TODEVICE);
4912                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4913                                 mapping, mapping);
4914
4915                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4916                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4917                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4918                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4919
4920         }
4921         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4922
4923         prod = NEXT_TX_BD(prod);
4924         bp->tx_prod_bseq += skb->len;
4925
4926         REG_WR16(bp, bp->tx_bidx_addr, prod);
4927         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4928
4929         mmiowb();
4930
4931         bp->tx_prod = prod;
4932         dev->trans_start = jiffies;
4933
4934         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4935                 netif_stop_queue(dev);
4936                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4937                         netif_wake_queue(dev);
4938         }
4939
4940         return NETDEV_TX_OK;
4941 }
4942
4943 /* Called with rtnl_lock */
4944 static int
4945 bnx2_close(struct net_device *dev)
4946 {
4947         struct bnx2 *bp = netdev_priv(dev);
4948         u32 reset_code;
4949
4950         /* Calling flush_scheduled_work() may deadlock because
4951          * linkwatch_event() may be on the workqueue and it will try to get
4952          * the rtnl_lock which we are holding.
4953          */
4954         while (bp->in_reset_task)
4955                 msleep(1);
4956
4957         bnx2_netif_stop(bp);
4958         del_timer_sync(&bp->timer);
4959         if (bp->flags & NO_WOL_FLAG)
4960                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4961         else if (bp->wol)
4962                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4963         else
4964                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4965         bnx2_reset_chip(bp, reset_code);
4966         bnx2_free_irq(bp);
4967         bnx2_free_skbs(bp);
4968         bnx2_free_mem(bp);
4969         bp->link_up = 0;
4970         netif_carrier_off(bp->dev);
4971         bnx2_set_power_state(bp, PCI_D3hot);
4972         return 0;
4973 }
4974
4975 #define GET_NET_STATS64(ctr)                                    \
4976         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4977         (unsigned long) (ctr##_lo)
4978
4979 #define GET_NET_STATS32(ctr)            \
4980         (ctr##_lo)
4981
4982 #if (BITS_PER_LONG == 64)
4983 #define GET_NET_STATS   GET_NET_STATS64
4984 #else
4985 #define GET_NET_STATS   GET_NET_STATS32
4986 #endif
4987
4988 static struct net_device_stats *
4989 bnx2_get_stats(struct net_device *dev)
4990 {
4991         struct bnx2 *bp = netdev_priv(dev);
4992         struct statistics_block *stats_blk = bp->stats_blk;
4993         struct net_device_stats *net_stats = &bp->net_stats;
4994
4995         if (bp->stats_blk == NULL) {
4996                 return net_stats;
4997         }
4998         net_stats->rx_packets =
4999                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5000                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5001                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5002
5003         net_stats->tx_packets =
5004                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5005                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5006                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5007
5008         net_stats->rx_bytes =
5009                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5010
5011         net_stats->tx_bytes =
5012                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5013
5014         net_stats->multicast =
5015                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5016
5017         net_stats->collisions =
5018                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5019
5020         net_stats->rx_length_errors =
5021                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5022                 stats_blk->stat_EtherStatsOverrsizePkts);
5023
5024         net_stats->rx_over_errors =
5025                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5026
5027         net_stats->rx_frame_errors =
5028                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5029
5030         net_stats->rx_crc_errors =
5031                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5032
5033         net_stats->rx_errors = net_stats->rx_length_errors +
5034                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5035                 net_stats->rx_crc_errors;
5036
5037         net_stats->tx_aborted_errors =
5038                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5039                 stats_blk->stat_Dot3StatsLateCollisions);
5040
5041         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5042             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5043                 net_stats->tx_carrier_errors = 0;
5044         else {
5045                 net_stats->tx_carrier_errors =
5046                         (unsigned long)
5047                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5048         }
5049
5050         net_stats->tx_errors =
5051                 (unsigned long)
5052                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5053                 +
5054                 net_stats->tx_aborted_errors +
5055                 net_stats->tx_carrier_errors;
5056
5057         net_stats->rx_missed_errors =
5058                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5059                 stats_blk->stat_FwRxDrop);
5060
5061         return net_stats;
5062 }
5063
5064 /* All ethtool functions called with rtnl_lock */
5065
5066 static int
5067 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5068 {
5069         struct bnx2 *bp = netdev_priv(dev);
5070
5071         cmd->supported = SUPPORTED_Autoneg;
5072         if (bp->phy_flags & PHY_SERDES_FLAG) {
5073                 cmd->supported |= SUPPORTED_1000baseT_Full |
5074                         SUPPORTED_FIBRE;
5075                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5076                         cmd->supported |= SUPPORTED_2500baseX_Full;
5077
5078                 cmd->port = PORT_FIBRE;
5079         }
5080         else {
5081                 cmd->supported |= SUPPORTED_10baseT_Half |
5082                         SUPPORTED_10baseT_Full |
5083                         SUPPORTED_100baseT_Half |
5084                         SUPPORTED_100baseT_Full |
5085                         SUPPORTED_1000baseT_Full |
5086                         SUPPORTED_TP;
5087
5088                 cmd->port = PORT_TP;
5089         }
5090
5091         cmd->advertising = bp->advertising;
5092
5093         if (bp->autoneg & AUTONEG_SPEED) {
5094                 cmd->autoneg = AUTONEG_ENABLE;
5095         }
5096         else {
5097                 cmd->autoneg = AUTONEG_DISABLE;
5098         }
5099
5100         if (netif_carrier_ok(dev)) {
5101                 cmd->speed = bp->line_speed;
5102                 cmd->duplex = bp->duplex;
5103         }
5104         else {
5105                 cmd->speed = -1;
5106                 cmd->duplex = -1;
5107         }
5108
5109         cmd->transceiver = XCVR_INTERNAL;
5110         cmd->phy_address = bp->phy_addr;
5111
5112         return 0;
5113 }
5114
5115 static int
5116 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5117 {
5118         struct bnx2 *bp = netdev_priv(dev);
5119         u8 autoneg = bp->autoneg;
5120         u8 req_duplex = bp->req_duplex;
5121         u16 req_line_speed = bp->req_line_speed;
5122         u32 advertising = bp->advertising;
5123
5124         if (cmd->autoneg == AUTONEG_ENABLE) {
5125                 autoneg |= AUTONEG_SPEED;
5126
5127                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5128
5129                 /* allow advertising 1 speed */
5130                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5131                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5132                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5133                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5134
5135                         if (bp->phy_flags & PHY_SERDES_FLAG)
5136                                 return -EINVAL;
5137
5138                         advertising = cmd->advertising;
5139
5140                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5141                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5142                                 return -EINVAL;
5143                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
5144                         advertising = cmd->advertising;
5145                 }
5146                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5147                         return -EINVAL;
5148                 }
5149                 else {
5150                         if (bp->phy_flags & PHY_SERDES_FLAG) {
5151                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5152                         }
5153                         else {
5154                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5155                         }
5156                 }
5157                 advertising |= ADVERTISED_Autoneg;
5158         }
5159         else {
5160                 if (bp->phy_flags & PHY_SERDES_FLAG) {
5161                         if ((cmd->speed != SPEED_1000 &&
5162                              cmd->speed != SPEED_2500) ||
5163                             (cmd->duplex != DUPLEX_FULL))
5164                                 return -EINVAL;
5165
5166                         if (cmd->speed == SPEED_2500 &&
5167                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5168                                 return -EINVAL;
5169                 }
5170                 else if (cmd->speed == SPEED_1000) {
5171                         return -EINVAL;
5172                 }
5173                 autoneg &= ~AUTONEG_SPEED;
5174                 req_line_speed = cmd->speed;
5175                 req_duplex = cmd->duplex;
5176                 advertising = 0;
5177         }
5178
5179         bp->autoneg = autoneg;
5180         bp->advertising = advertising;
5181         bp->req_line_speed = req_line_speed;
5182         bp->req_duplex = req_duplex;
5183
5184         spin_lock_bh(&bp->phy_lock);
5185
5186         bnx2_setup_phy(bp);
5187
5188         spin_unlock_bh(&bp->phy_lock);
5189
5190         return 0;
5191 }
5192
5193 static void
5194 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5195 {
5196         struct bnx2 *bp = netdev_priv(dev);
5197
5198         strcpy(info->driver, DRV_MODULE_NAME);
5199         strcpy(info->version, DRV_MODULE_VERSION);
5200         strcpy(info->bus_info, pci_name(bp->pdev));
5201         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5202         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5203         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5204         info->fw_version[1] = info->fw_version[3] = '.';
5205         info->fw_version[5] = 0;
5206 }
5207
5208 #define BNX2_REGDUMP_LEN                (32 * 1024)
5209
5210 static int
5211 bnx2_get_regs_len(struct net_device *dev)
5212 {
5213         return BNX2_REGDUMP_LEN;
5214 }
5215
5216 static void
5217 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5218 {
5219         u32 *p = _p, i, offset;
5220         u8 *orig_p = _p;
5221         struct bnx2 *bp = netdev_priv(dev);
5222         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5223                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5224                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5225                                  0x1040, 0x1048, 0x1080, 0x10a4,
5226                                  0x1400, 0x1490, 0x1498, 0x14f0,
5227                                  0x1500, 0x155c, 0x1580, 0x15dc,
5228                                  0x1600, 0x1658, 0x1680, 0x16d8,
5229                                  0x1800, 0x1820, 0x1840, 0x1854,
5230                                  0x1880, 0x1894, 0x1900, 0x1984,
5231                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5232                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5233                                  0x2000, 0x2030, 0x23c0, 0x2400,
5234                                  0x2800, 0x2820, 0x2830, 0x2850,
5235                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5236                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5237                                  0x4080, 0x4090, 0x43c0, 0x4458,
5238                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5239                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5240                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5241                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5242                                  0x6800, 0x6848, 0x684c, 0x6860,
5243                                  0x6888, 0x6910, 0x8000 };
5244
5245         regs->version = 0;
5246
5247         memset(p, 0, BNX2_REGDUMP_LEN);
5248
5249         if (!netif_running(bp->dev))
5250                 return;
5251
5252         i = 0;
5253         offset = reg_boundaries[0];
5254         p += offset;
5255         while (offset < BNX2_REGDUMP_LEN) {
5256                 *p++ = REG_RD(bp, offset);
5257                 offset += 4;
5258                 if (offset == reg_boundaries[i + 1]) {
5259                         offset = reg_boundaries[i + 2];
5260                         p = (u32 *) (orig_p + offset);
5261                         i += 2;
5262                 }
5263         }
5264 }
5265
5266 static void
5267 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5268 {
5269         struct bnx2 *bp = netdev_priv(dev);
5270
5271         if (bp->flags & NO_WOL_FLAG) {
5272                 wol->supported = 0;
5273                 wol->wolopts = 0;
5274         }
5275         else {
5276                 wol->supported = WAKE_MAGIC;
5277                 if (bp->wol)
5278                         wol->wolopts = WAKE_MAGIC;
5279                 else
5280                         wol->wolopts = 0;
5281         }
5282         memset(&wol->sopass, 0, sizeof(wol->sopass));
5283 }
5284
5285 static int
5286 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5287 {
5288         struct bnx2 *bp = netdev_priv(dev);
5289
5290         if (wol->wolopts & ~WAKE_MAGIC)
5291                 return -EINVAL;
5292
5293         if (wol->wolopts & WAKE_MAGIC) {
5294                 if (bp->flags & NO_WOL_FLAG)
5295                         return -EINVAL;
5296
5297                 bp->wol = 1;
5298         }
5299         else {
5300                 bp->wol = 0;
5301         }
5302         return 0;
5303 }
5304
5305 static int
5306 bnx2_nway_reset(struct net_device *dev)
5307 {
5308         struct bnx2 *bp = netdev_priv(dev);
5309         u32 bmcr;
5310
5311         if (!(bp->autoneg & AUTONEG_SPEED)) {
5312                 return -EINVAL;
5313         }
5314
5315         spin_lock_bh(&bp->phy_lock);
5316
5317         /* Force a link down visible on the other side */
5318         if (bp->phy_flags & PHY_SERDES_FLAG) {
5319                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5320                 spin_unlock_bh(&bp->phy_lock);
5321
5322                 msleep(20);
5323
5324                 spin_lock_bh(&bp->phy_lock);
5325
5326                 bp->current_interval = SERDES_AN_TIMEOUT;
5327                 bp->serdes_an_pending = 1;
5328                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5329         }
5330
5331         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5332         bmcr &= ~BMCR_LOOPBACK;
5333         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5334
5335         spin_unlock_bh(&bp->phy_lock);
5336
5337         return 0;
5338 }
5339
5340 static int
5341 bnx2_get_eeprom_len(struct net_device *dev)
5342 {
5343         struct bnx2 *bp = netdev_priv(dev);
5344
5345         if (bp->flash_info == NULL)
5346                 return 0;
5347
5348         return (int) bp->flash_size;
5349 }
5350
5351 static int
5352 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5353                 u8 *eebuf)
5354 {
5355         struct bnx2 *bp = netdev_priv(dev);
5356         int rc;
5357
5358         /* parameters already validated in ethtool_get_eeprom */
5359
5360         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5361
5362         return rc;
5363 }
5364
5365 static int
5366 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5367                 u8 *eebuf)
5368 {
5369         struct bnx2 *bp = netdev_priv(dev);
5370         int rc;
5371
5372         /* parameters already validated in ethtool_set_eeprom */
5373
5374         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5375
5376         return rc;
5377 }
5378
5379 static int
5380 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5381 {
5382         struct bnx2 *bp = netdev_priv(dev);
5383
5384         memset(coal, 0, sizeof(struct ethtool_coalesce));
5385
5386         coal->rx_coalesce_usecs = bp->rx_ticks;
5387         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5388         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5389         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5390
5391         coal->tx_coalesce_usecs = bp->tx_ticks;
5392         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5393         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5394         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5395
5396         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5397
5398         return 0;
5399 }
5400
5401 static int
5402 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5403 {
5404         struct bnx2 *bp = netdev_priv(dev);
5405
5406         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5407         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5408
5409         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5410         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5411
5412         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5413         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5414
5415         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5416         if (bp->rx_quick_cons_trip_int > 0xff)
5417                 bp->rx_quick_cons_trip_int = 0xff;
5418
5419         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5420         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5421
5422         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5423         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5424
5425         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5426         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5427
5428         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5429         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5430                 0xff;
5431
5432         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5433         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5434         bp->stats_ticks &= 0xffff00;
5435
5436         if (netif_running(bp->dev)) {
5437                 bnx2_netif_stop(bp);
5438                 bnx2_init_nic(bp);
5439                 bnx2_netif_start(bp);
5440         }
5441
5442         return 0;
5443 }
5444
5445 static void
5446 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5447 {
5448         struct bnx2 *bp = netdev_priv(dev);
5449
5450         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5451         ering->rx_mini_max_pending = 0;
5452         ering->rx_jumbo_max_pending = 0;
5453
5454         ering->rx_pending = bp->rx_ring_size;
5455         ering->rx_mini_pending = 0;
5456         ering->rx_jumbo_pending = 0;
5457
5458         ering->tx_max_pending = MAX_TX_DESC_CNT;
5459         ering->tx_pending = bp->tx_ring_size;
5460 }
5461
5462 static int
5463 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5464 {
5465         struct bnx2 *bp = netdev_priv(dev);
5466
5467         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5468                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5469                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5470
5471                 return -EINVAL;
5472         }
5473         if (netif_running(bp->dev)) {
5474                 bnx2_netif_stop(bp);
5475                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5476                 bnx2_free_skbs(bp);
5477                 bnx2_free_mem(bp);
5478         }
5479
5480         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5481         bp->tx_ring_size = ering->tx_pending;
5482
5483         if (netif_running(bp->dev)) {
5484                 int rc;
5485
5486                 rc = bnx2_alloc_mem(bp);
5487                 if (rc)
5488                         return rc;
5489                 bnx2_init_nic(bp);
5490                 bnx2_netif_start(bp);
5491         }
5492
5493         return 0;
5494 }
5495
5496 static void
5497 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5498 {
5499         struct bnx2 *bp = netdev_priv(dev);
5500
5501         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5502         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5503         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5504 }
5505
5506 static int
5507 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5508 {
5509         struct bnx2 *bp = netdev_priv(dev);
5510
5511         bp->req_flow_ctrl = 0;
5512         if (epause->rx_pause)
5513                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5514         if (epause->tx_pause)
5515                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5516
5517         if (epause->autoneg) {
5518                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5519         }
5520         else {
5521                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5522         }
5523
5524         spin_lock_bh(&bp->phy_lock);
5525
5526         bnx2_setup_phy(bp);
5527
5528         spin_unlock_bh(&bp->phy_lock);
5529
5530         return 0;
5531 }
5532
5533 static u32
5534 bnx2_get_rx_csum(struct net_device *dev)
5535 {
5536         struct bnx2 *bp = netdev_priv(dev);
5537
5538         return bp->rx_csum;
5539 }
5540
5541 static int
5542 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5543 {
5544         struct bnx2 *bp = netdev_priv(dev);
5545
5546         bp->rx_csum = data;
5547         return 0;
5548 }
5549
5550 static int
5551 bnx2_set_tso(struct net_device *dev, u32 data)
5552 {
5553         struct bnx2 *bp = netdev_priv(dev);
5554
5555         if (data) {
5556                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5557                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5558                         dev->features |= NETIF_F_TSO6;
5559         } else
5560                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5561                                    NETIF_F_TSO_ECN);
5562         return 0;
5563 }
5564
5565 #define BNX2_NUM_STATS 46
5566
5567 static struct {
5568         char string[ETH_GSTRING_LEN];
5569 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5570         { "rx_bytes" },
5571         { "rx_error_bytes" },
5572         { "tx_bytes" },
5573         { "tx_error_bytes" },
5574         { "rx_ucast_packets" },
5575         { "rx_mcast_packets" },
5576         { "rx_bcast_packets" },
5577         { "tx_ucast_packets" },
5578         { "tx_mcast_packets" },
5579         { "tx_bcast_packets" },
5580         { "tx_mac_errors" },
5581         { "tx_carrier_errors" },
5582         { "rx_crc_errors" },
5583         { "rx_align_errors" },
5584         { "tx_single_collisions" },
5585         { "tx_multi_collisions" },
5586         { "tx_deferred" },
5587         { "tx_excess_collisions" },
5588         { "tx_late_collisions" },
5589         { "tx_total_collisions" },
5590         { "rx_fragments" },
5591         { "rx_jabbers" },
5592         { "rx_undersize_packets" },
5593         { "rx_oversize_packets" },
5594         { "rx_64_byte_packets" },
5595         { "rx_65_to_127_byte_packets" },
5596         { "rx_128_to_255_byte_packets" },
5597         { "rx_256_to_511_byte_packets" },
5598         { "rx_512_to_1023_byte_packets" },
5599         { "rx_1024_to_1522_byte_packets" },
5600         { "rx_1523_to_9022_byte_packets" },
5601         { "tx_64_byte_packets" },
5602         { "tx_65_to_127_byte_packets" },
5603         { "tx_128_to_255_byte_packets" },
5604         { "tx_256_to_511_byte_packets" },
5605         { "tx_512_to_1023_byte_packets" },
5606         { "tx_1024_to_1522_byte_packets" },
5607         { "tx_1523_to_9022_byte_packets" },
5608         { "rx_xon_frames" },
5609         { "rx_xoff_frames" },
5610         { "tx_xon_frames" },
5611         { "tx_xoff_frames" },
5612         { "rx_mac_ctrl_frames" },
5613         { "rx_filtered_packets" },
5614         { "rx_discards" },
5615         { "rx_fw_discards" },
5616 };
5617
5618 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5619
5620 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5621     STATS_OFFSET32(stat_IfHCInOctets_hi),
5622     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5623     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5624     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5625     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5626     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5627     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5628     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5629     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5630     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5631     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5632     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5633     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5634     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5635     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5636     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5637     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5638     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5639     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5640     STATS_OFFSET32(stat_EtherStatsCollisions),
5641     STATS_OFFSET32(stat_EtherStatsFragments),
5642     STATS_OFFSET32(stat_EtherStatsJabbers),
5643     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5644     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5645     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5646     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5647     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5648     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5649     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5650     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5651     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5652     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5653     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5654     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5655     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5656     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5657     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5658     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5659     STATS_OFFSET32(stat_XonPauseFramesReceived),
5660     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5661     STATS_OFFSET32(stat_OutXonSent),
5662     STATS_OFFSET32(stat_OutXoffSent),
5663     STATS_OFFSET32(stat_MacControlFramesReceived),
5664     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5665     STATS_OFFSET32(stat_IfInMBUFDiscards),
5666     STATS_OFFSET32(stat_FwRxDrop),
5667 };
5668
5669 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5670  * skipped because of errata.
5671  */
5672 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5673         8,0,8,8,8,8,8,8,8,8,
5674         4,0,4,4,4,4,4,4,4,4,
5675         4,4,4,4,4,4,4,4,4,4,
5676         4,4,4,4,4,4,4,4,4,4,
5677         4,4,4,4,4,4,
5678 };
5679
5680 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5681         8,0,8,8,8,8,8,8,8,8,
5682         4,4,4,4,4,4,4,4,4,4,
5683         4,4,4,4,4,4,4,4,4,4,
5684         4,4,4,4,4,4,4,4,4,4,
5685         4,4,4,4,4,4,
5686 };
5687
5688 #define BNX2_NUM_TESTS 6
5689
5690 static struct {
5691         char string[ETH_GSTRING_LEN];
5692 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5693         { "register_test (offline)" },
5694         { "memory_test (offline)" },
5695         { "loopback_test (offline)" },
5696         { "nvram_test (online)" },
5697         { "interrupt_test (online)" },
5698         { "link_test (online)" },
5699 };
5700
5701 static int
5702 bnx2_self_test_count(struct net_device *dev)
5703 {
5704         return BNX2_NUM_TESTS;
5705 }
5706
5707 static void
5708 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5709 {
5710         struct bnx2 *bp = netdev_priv(dev);
5711
5712         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5713         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5714                 int i;
5715
5716                 bnx2_netif_stop(bp);
5717                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5718                 bnx2_free_skbs(bp);
5719
5720                 if (bnx2_test_registers(bp) != 0) {
5721                         buf[0] = 1;
5722                         etest->flags |= ETH_TEST_FL_FAILED;
5723                 }
5724                 if (bnx2_test_memory(bp) != 0) {
5725                         buf[1] = 1;
5726                         etest->flags |= ETH_TEST_FL_FAILED;
5727                 }
5728                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5729                         etest->flags |= ETH_TEST_FL_FAILED;
5730
5731                 if (!netif_running(bp->dev)) {
5732                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5733                 }
5734                 else {
5735                         bnx2_init_nic(bp);
5736                         bnx2_netif_start(bp);
5737                 }
5738
5739                 /* wait for link up */
5740                 for (i = 0; i < 7; i++) {
5741                         if (bp->link_up)
5742                                 break;
5743                         msleep_interruptible(1000);
5744                 }
5745         }
5746
5747         if (bnx2_test_nvram(bp) != 0) {
5748                 buf[3] = 1;
5749                 etest->flags |= ETH_TEST_FL_FAILED;
5750         }
5751         if (bnx2_test_intr(bp) != 0) {
5752                 buf[4] = 1;
5753                 etest->flags |= ETH_TEST_FL_FAILED;
5754         }
5755
5756         if (bnx2_test_link(bp) != 0) {
5757                 buf[5] = 1;
5758                 etest->flags |= ETH_TEST_FL_FAILED;
5759
5760         }
5761 }
5762
5763 static void
5764 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5765 {
5766         switch (stringset) {
5767         case ETH_SS_STATS:
5768                 memcpy(buf, bnx2_stats_str_arr,
5769                         sizeof(bnx2_stats_str_arr));
5770                 break;
5771         case ETH_SS_TEST:
5772                 memcpy(buf, bnx2_tests_str_arr,
5773                         sizeof(bnx2_tests_str_arr));
5774                 break;
5775         }
5776 }
5777
5778 static int
5779 bnx2_get_stats_count(struct net_device *dev)
5780 {
5781         return BNX2_NUM_STATS;
5782 }
5783
5784 static void
5785 bnx2_get_ethtool_stats(struct net_device *dev,
5786                 struct ethtool_stats *stats, u64 *buf)
5787 {
5788         struct bnx2 *bp = netdev_priv(dev);
5789         int i;
5790         u32 *hw_stats = (u32 *) bp->stats_blk;
5791         u8 *stats_len_arr = NULL;
5792
5793         if (hw_stats == NULL) {
5794                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5795                 return;
5796         }
5797
5798         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5799             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5800             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5801             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5802                 stats_len_arr = bnx2_5706_stats_len_arr;
5803         else
5804                 stats_len_arr = bnx2_5708_stats_len_arr;
5805
5806         for (i = 0; i < BNX2_NUM_STATS; i++) {
5807                 if (stats_len_arr[i] == 0) {
5808                         /* skip this counter */
5809                         buf[i] = 0;
5810                         continue;
5811                 }
5812                 if (stats_len_arr[i] == 4) {
5813                         /* 4-byte counter */
5814                         buf[i] = (u64)
5815                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5816                         continue;
5817                 }
5818                 /* 8-byte counter */
5819                 buf[i] = (((u64) *(hw_stats +
5820                                         bnx2_stats_offset_arr[i])) << 32) +
5821                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5822         }
5823 }
5824
5825 static int
5826 bnx2_phys_id(struct net_device *dev, u32 data)
5827 {
5828         struct bnx2 *bp = netdev_priv(dev);
5829         int i;
5830         u32 save;
5831
5832         if (data == 0)
5833                 data = 2;
5834
5835         save = REG_RD(bp, BNX2_MISC_CFG);
5836         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5837
5838         for (i = 0; i < (data * 2); i++) {
5839                 if ((i % 2) == 0) {
5840                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5841                 }
5842                 else {
5843                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5844                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5845                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5846                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5847                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5848                                 BNX2_EMAC_LED_TRAFFIC);
5849                 }
5850                 msleep_interruptible(500);
5851                 if (signal_pending(current))
5852                         break;
5853         }
5854         REG_WR(bp, BNX2_EMAC_LED, 0);
5855         REG_WR(bp, BNX2_MISC_CFG, save);
5856         return 0;
5857 }
5858
5859 static int
5860 bnx2_set_tx_csum(struct net_device *dev, u32 data)
5861 {
5862         struct bnx2 *bp = netdev_priv(dev);
5863
5864         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5865                 return (ethtool_op_set_tx_hw_csum(dev, data));
5866         else
5867                 return (ethtool_op_set_tx_csum(dev, data));
5868 }
5869
5870 static const struct ethtool_ops bnx2_ethtool_ops = {
5871         .get_settings           = bnx2_get_settings,
5872         .set_settings           = bnx2_set_settings,
5873         .get_drvinfo            = bnx2_get_drvinfo,
5874         .get_regs_len           = bnx2_get_regs_len,
5875         .get_regs               = bnx2_get_regs,
5876         .get_wol                = bnx2_get_wol,
5877         .set_wol                = bnx2_set_wol,
5878         .nway_reset             = bnx2_nway_reset,
5879         .get_link               = ethtool_op_get_link,
5880         .get_eeprom_len         = bnx2_get_eeprom_len,
5881         .get_eeprom             = bnx2_get_eeprom,
5882         .set_eeprom             = bnx2_set_eeprom,
5883         .get_coalesce           = bnx2_get_coalesce,
5884         .set_coalesce           = bnx2_set_coalesce,
5885         .get_ringparam          = bnx2_get_ringparam,
5886         .set_ringparam          = bnx2_set_ringparam,
5887         .get_pauseparam         = bnx2_get_pauseparam,
5888         .set_pauseparam         = bnx2_set_pauseparam,
5889         .get_rx_csum            = bnx2_get_rx_csum,
5890         .set_rx_csum            = bnx2_set_rx_csum,
5891         .get_tx_csum            = ethtool_op_get_tx_csum,
5892         .set_tx_csum            = bnx2_set_tx_csum,
5893         .get_sg                 = ethtool_op_get_sg,
5894         .set_sg                 = ethtool_op_set_sg,
5895         .get_tso                = ethtool_op_get_tso,
5896         .set_tso                = bnx2_set_tso,
5897         .self_test_count        = bnx2_self_test_count,
5898         .self_test              = bnx2_self_test,
5899         .get_strings            = bnx2_get_strings,
5900         .phys_id                = bnx2_phys_id,
5901         .get_stats_count        = bnx2_get_stats_count,
5902         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5903         .get_perm_addr          = ethtool_op_get_perm_addr,
5904 };
5905
5906 /* Called with rtnl_lock */
5907 static int
5908 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5909 {
5910         struct mii_ioctl_data *data = if_mii(ifr);
5911         struct bnx2 *bp = netdev_priv(dev);
5912         int err;
5913
5914         switch(cmd) {
5915         case SIOCGMIIPHY:
5916                 data->phy_id = bp->phy_addr;
5917
5918                 /* fallthru */
5919         case SIOCGMIIREG: {
5920                 u32 mii_regval;
5921
5922                 if (!netif_running(dev))
5923                         return -EAGAIN;
5924
5925                 spin_lock_bh(&bp->phy_lock);
5926                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5927                 spin_unlock_bh(&bp->phy_lock);
5928
5929                 data->val_out = mii_regval;
5930
5931                 return err;
5932         }
5933
5934         case SIOCSMIIREG:
5935                 if (!capable(CAP_NET_ADMIN))
5936                         return -EPERM;
5937
5938                 if (!netif_running(dev))
5939                         return -EAGAIN;
5940
5941                 spin_lock_bh(&bp->phy_lock);
5942                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5943                 spin_unlock_bh(&bp->phy_lock);
5944
5945                 return err;
5946
5947         default:
5948                 /* do nothing */
5949                 break;
5950         }
5951         return -EOPNOTSUPP;
5952 }
5953
5954 /* Called with rtnl_lock */
5955 static int
5956 bnx2_change_mac_addr(struct net_device *dev, void *p)
5957 {
5958         struct sockaddr *addr = p;
5959         struct bnx2 *bp = netdev_priv(dev);
5960
5961         if (!is_valid_ether_addr(addr->sa_data))
5962                 return -EINVAL;
5963
5964         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5965         if (netif_running(dev))
5966                 bnx2_set_mac_addr(bp);
5967
5968         return 0;
5969 }
5970
5971 /* Called with rtnl_lock */
5972 static int
5973 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5974 {
5975         struct bnx2 *bp = netdev_priv(dev);
5976
5977         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5978                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5979                 return -EINVAL;
5980
5981         dev->mtu = new_mtu;
5982         if (netif_running(dev)) {
5983                 bnx2_netif_stop(bp);
5984
5985                 bnx2_init_nic(bp);
5986
5987                 bnx2_netif_start(bp);
5988         }
5989         return 0;
5990 }
5991
5992 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5993 static void
5994 poll_bnx2(struct net_device *dev)
5995 {
5996         struct bnx2 *bp = netdev_priv(dev);
5997
5998         disable_irq(bp->pdev->irq);
5999         bnx2_interrupt(bp->pdev->irq, dev);
6000         enable_irq(bp->pdev->irq);
6001 }
6002 #endif
6003
6004 static void __devinit
6005 bnx2_get_5709_media(struct bnx2 *bp)
6006 {
6007         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6008         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6009         u32 strap;
6010
6011         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6012                 return;
6013         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6014                 bp->phy_flags |= PHY_SERDES_FLAG;
6015                 return;
6016         }
6017
6018         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6019                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6020         else
6021                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6022
6023         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6024                 switch (strap) {
6025                 case 0x4:
6026                 case 0x5:
6027                 case 0x6:
6028                         bp->phy_flags |= PHY_SERDES_FLAG;
6029                         return;
6030                 }
6031         } else {
6032                 switch (strap) {
6033                 case 0x1:
6034                 case 0x2:
6035                 case 0x4:
6036                         bp->phy_flags |= PHY_SERDES_FLAG;
6037                         return;
6038                 }
6039         }
6040 }
6041
6042 static void __devinit
6043 bnx2_get_pci_speed(struct bnx2 *bp)
6044 {
6045         u32 reg;
6046
6047         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6048         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6049                 u32 clkreg;
6050
6051                 bp->flags |= PCIX_FLAG;
6052
6053                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6054
6055                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6056                 switch (clkreg) {
6057                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6058                         bp->bus_speed_mhz = 133;
6059                         break;
6060
6061                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6062                         bp->bus_speed_mhz = 100;
6063                         break;
6064
6065                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6066                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6067                         bp->bus_speed_mhz = 66;
6068                         break;
6069
6070                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6071                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6072                         bp->bus_speed_mhz = 50;
6073                         break;
6074
6075                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6076                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6077                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6078                         bp->bus_speed_mhz = 33;
6079                         break;
6080                 }
6081         }
6082         else {
6083                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6084                         bp->bus_speed_mhz = 66;
6085                 else
6086                         bp->bus_speed_mhz = 33;
6087         }
6088
6089         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6090                 bp->flags |= PCI_32BIT_FLAG;
6091
6092 }
6093
6094 static int __devinit
6095 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6096 {
6097         struct bnx2 *bp;
6098         unsigned long mem_len;
6099         int rc;
6100         u32 reg;
6101         u64 dma_mask, persist_dma_mask;
6102
6103         SET_MODULE_OWNER(dev);
6104         SET_NETDEV_DEV(dev, &pdev->dev);
6105         bp = netdev_priv(dev);
6106
6107         bp->flags = 0;
6108         bp->phy_flags = 0;
6109
6110         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6111         rc = pci_enable_device(pdev);
6112         if (rc) {
6113                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6114                 goto err_out;
6115         }
6116
6117         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6118                 dev_err(&pdev->dev,
6119                         "Cannot find PCI device base address, aborting.\n");
6120                 rc = -ENODEV;
6121                 goto err_out_disable;
6122         }
6123
6124         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6125         if (rc) {
6126                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6127                 goto err_out_disable;
6128         }
6129
6130         pci_set_master(pdev);
6131
6132         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6133         if (bp->pm_cap == 0) {
6134                 dev_err(&pdev->dev,
6135                         "Cannot find power management capability, aborting.\n");
6136                 rc = -EIO;
6137                 goto err_out_release;
6138         }
6139
6140         bp->dev = dev;
6141         bp->pdev = pdev;
6142
6143         spin_lock_init(&bp->phy_lock);
6144         spin_lock_init(&bp->indirect_lock);
6145         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6146
6147         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6148         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6149         dev->mem_end = dev->mem_start + mem_len;
6150         dev->irq = pdev->irq;
6151
6152         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6153
6154         if (!bp->regview) {
6155                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6156                 rc = -ENOMEM;
6157                 goto err_out_release;
6158         }
6159
6160         /* Configure byte swap and enable write to the reg_window registers.
6161          * Rely on CPU to do target byte swapping on big endian systems
6162          * The chip's target access swapping will not swap all accesses
6163          */
6164         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6165                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6166                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6167
6168         bnx2_set_power_state(bp, PCI_D0);
6169
6170         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6171
6172         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6173                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6174                         dev_err(&pdev->dev,
6175                                 "Cannot find PCIE capability, aborting.\n");
6176                         rc = -EIO;
6177                         goto err_out_unmap;
6178                 }
6179                 bp->flags |= PCIE_FLAG;
6180         } else {
6181                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6182                 if (bp->pcix_cap == 0) {
6183                         dev_err(&pdev->dev,
6184                                 "Cannot find PCIX capability, aborting.\n");
6185                         rc = -EIO;
6186                         goto err_out_unmap;
6187                 }
6188         }
6189
6190         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6191                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6192                         bp->flags |= MSI_CAP_FLAG;
6193         }
6194
6195         /* 5708 cannot support DMA addresses > 40-bit.  */
6196         if (CHIP_NUM(bp) == CHIP_NUM_5708)
6197                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6198         else
6199                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6200
6201         /* Configure DMA attributes. */
6202         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6203                 dev->features |= NETIF_F_HIGHDMA;
6204                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6205                 if (rc) {
6206                         dev_err(&pdev->dev,
6207                                 "pci_set_consistent_dma_mask failed, aborting.\n");
6208                         goto err_out_unmap;
6209                 }
6210         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6211                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6212                 goto err_out_unmap;
6213         }
6214
6215         if (!(bp->flags & PCIE_FLAG))
6216                 bnx2_get_pci_speed(bp);
6217
6218         /* 5706A0 may falsely detect SERR and PERR. */
6219         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6220                 reg = REG_RD(bp, PCI_COMMAND);
6221                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6222                 REG_WR(bp, PCI_COMMAND, reg);
6223         }
6224         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6225                 !(bp->flags & PCIX_FLAG)) {
6226
6227                 dev_err(&pdev->dev,
6228                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
6229                 goto err_out_unmap;
6230         }
6231
6232         bnx2_init_nvram(bp);
6233
6234         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6235
6236         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6237             BNX2_SHM_HDR_SIGNATURE_SIG) {
6238                 u32 off = PCI_FUNC(pdev->devfn) << 2;
6239
6240                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6241         } else
6242                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6243
6244         /* Get the permanent MAC address.  First we need to make sure the
6245          * firmware is actually running.
6246          */
6247         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6248
6249         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6250             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6251                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6252                 rc = -ENODEV;
6253                 goto err_out_unmap;
6254         }
6255
6256         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6257
6258         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6259         bp->mac_addr[0] = (u8) (reg >> 8);
6260         bp->mac_addr[1] = (u8) reg;
6261
6262         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6263         bp->mac_addr[2] = (u8) (reg >> 24);
6264         bp->mac_addr[3] = (u8) (reg >> 16);
6265         bp->mac_addr[4] = (u8) (reg >> 8);
6266         bp->mac_addr[5] = (u8) reg;
6267
6268         bp->tx_ring_size = MAX_TX_DESC_CNT;
6269         bnx2_set_rx_ring_size(bp, 255);
6270
6271         bp->rx_csum = 1;
6272
6273         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6274
6275         bp->tx_quick_cons_trip_int = 20;
6276         bp->tx_quick_cons_trip = 20;
6277         bp->tx_ticks_int = 80;
6278         bp->tx_ticks = 80;
6279
6280         bp->rx_quick_cons_trip_int = 6;
6281         bp->rx_quick_cons_trip = 6;
6282         bp->rx_ticks_int = 18;
6283         bp->rx_ticks = 18;
6284
6285         bp->stats_ticks = 1000000 & 0xffff00;
6286
6287         bp->timer_interval =  HZ;
6288         bp->current_interval =  HZ;
6289
6290         bp->phy_addr = 1;
6291
6292         /* Disable WOL support if we are running on a SERDES chip. */
6293         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6294                 bnx2_get_5709_media(bp);
6295         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6296                 bp->phy_flags |= PHY_SERDES_FLAG;
6297
6298         if (bp->phy_flags & PHY_SERDES_FLAG) {
6299                 bp->flags |= NO_WOL_FLAG;
6300                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6301                         bp->phy_addr = 2;
6302                         reg = REG_RD_IND(bp, bp->shmem_base +
6303                                          BNX2_SHARED_HW_CFG_CONFIG);
6304                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6305                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6306                 }
6307         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6308                    CHIP_NUM(bp) == CHIP_NUM_5708)
6309                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6310         else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6311                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6312
6313         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6314             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6315             (CHIP_ID(bp) == CHIP_ID_5708_B1))
6316                 bp->flags |= NO_WOL_FLAG;
6317
6318         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6319                 bp->tx_quick_cons_trip_int =
6320                         bp->tx_quick_cons_trip;
6321                 bp->tx_ticks_int = bp->tx_ticks;
6322                 bp->rx_quick_cons_trip_int =
6323                         bp->rx_quick_cons_trip;
6324                 bp->rx_ticks_int = bp->rx_ticks;
6325                 bp->comp_prod_trip_int = bp->comp_prod_trip;
6326                 bp->com_ticks_int = bp->com_ticks;
6327                 bp->cmd_ticks_int = bp->cmd_ticks;
6328         }
6329
6330         /* Disable MSI on 5706 if AMD 8132 bridge is found.
6331          *
6332          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
6333          * with byte enables disabled on the unused 32-bit word.  This is legal
6334          * but causes problems on the AMD 8132 which will eventually stop
6335          * responding after a while.
6336          *
6337          * AMD believes this incompatibility is unique to the 5706, and
6338          * prefers to locally disable MSI rather than globally disabling it.
6339          */
6340         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6341                 struct pci_dev *amd_8132 = NULL;
6342
6343                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6344                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
6345                                                   amd_8132))) {
6346                         u8 rev;
6347
6348                         pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6349                         if (rev >= 0x10 && rev <= 0x13) {
6350                                 disable_msi = 1;
6351                                 pci_dev_put(amd_8132);
6352                                 break;
6353                         }
6354                 }
6355         }
6356
6357         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6358         bp->req_line_speed = 0;
6359         if (bp->phy_flags & PHY_SERDES_FLAG) {
6360                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6361
6362                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6363                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6364                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6365                         bp->autoneg = 0;
6366                         bp->req_line_speed = bp->line_speed = SPEED_1000;
6367                         bp->req_duplex = DUPLEX_FULL;
6368                 }
6369         }
6370         else {
6371                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6372         }
6373
6374         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6375
6376         init_timer(&bp->timer);
6377         bp->timer.expires = RUN_AT(bp->timer_interval);
6378         bp->timer.data = (unsigned long) bp;
6379         bp->timer.function = bnx2_timer;
6380
6381         return 0;
6382
6383 err_out_unmap:
6384         if (bp->regview) {
6385                 iounmap(bp->regview);
6386                 bp->regview = NULL;
6387         }
6388
6389 err_out_release:
6390         pci_release_regions(pdev);
6391
6392 err_out_disable:
6393         pci_disable_device(pdev);
6394         pci_set_drvdata(pdev, NULL);
6395
6396 err_out:
6397         return rc;
6398 }
6399
6400 static char * __devinit
6401 bnx2_bus_string(struct bnx2 *bp, char *str)
6402 {
6403         char *s = str;
6404
6405         if (bp->flags & PCIE_FLAG) {
6406                 s += sprintf(s, "PCI Express");
6407         } else {
6408                 s += sprintf(s, "PCI");
6409                 if (bp->flags & PCIX_FLAG)
6410                         s += sprintf(s, "-X");
6411                 if (bp->flags & PCI_32BIT_FLAG)
6412                         s += sprintf(s, " 32-bit");
6413                 else
6414                         s += sprintf(s, " 64-bit");
6415                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6416         }
6417         return str;
6418 }
6419
6420 static int __devinit
6421 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6422 {
6423         static int version_printed = 0;
6424         struct net_device *dev = NULL;
6425         struct bnx2 *bp;
6426         int rc, i;
6427         char str[40];
6428
6429         if (version_printed++ == 0)
6430                 printk(KERN_INFO "%s", version);
6431
6432         /* dev zeroed in init_etherdev */
6433         dev = alloc_etherdev(sizeof(*bp));
6434
6435         if (!dev)
6436                 return -ENOMEM;
6437
6438         rc = bnx2_init_board(pdev, dev);
6439         if (rc < 0) {
6440                 free_netdev(dev);
6441                 return rc;
6442         }
6443
6444         dev->open = bnx2_open;
6445         dev->hard_start_xmit = bnx2_start_xmit;
6446         dev->stop = bnx2_close;
6447         dev->get_stats = bnx2_get_stats;
6448         dev->set_multicast_list = bnx2_set_rx_mode;
6449         dev->do_ioctl = bnx2_ioctl;
6450         dev->set_mac_address = bnx2_change_mac_addr;
6451         dev->change_mtu = bnx2_change_mtu;
6452         dev->tx_timeout = bnx2_tx_timeout;
6453         dev->watchdog_timeo = TX_TIMEOUT;
6454 #ifdef BCM_VLAN
6455         dev->vlan_rx_register = bnx2_vlan_rx_register;
6456         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6457 #endif
6458         dev->poll = bnx2_poll;
6459         dev->ethtool_ops = &bnx2_ethtool_ops;
6460         dev->weight = 64;
6461
6462         bp = netdev_priv(dev);
6463
6464 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6465         dev->poll_controller = poll_bnx2;
6466 #endif
6467
6468         pci_set_drvdata(pdev, dev);
6469
6470         memcpy(dev->dev_addr, bp->mac_addr, 6);
6471         memcpy(dev->perm_addr, bp->mac_addr, 6);
6472         bp->name = board_info[ent->driver_data].name;
6473
6474         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6475                 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6476         else
6477                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6478 #ifdef BCM_VLAN
6479         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6480 #endif
6481         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6482         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6483                 dev->features |= NETIF_F_TSO6;
6484
6485         if ((rc = register_netdev(dev))) {
6486                 dev_err(&pdev->dev, "Cannot register net device\n");
6487                 if (bp->regview)
6488                         iounmap(bp->regview);
6489                 pci_release_regions(pdev);
6490                 pci_disable_device(pdev);
6491                 pci_set_drvdata(pdev, NULL);
6492                 free_netdev(dev);
6493                 return rc;
6494         }
6495
6496         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6497                 "IRQ %d, ",
6498                 dev->name,
6499                 bp->name,
6500                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6501                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6502                 bnx2_bus_string(bp, str),
6503                 dev->base_addr,
6504                 bp->pdev->irq);
6505
6506         printk("node addr ");
6507         for (i = 0; i < 6; i++)
6508                 printk("%2.2x", dev->dev_addr[i]);
6509         printk("\n");
6510
6511         return 0;
6512 }
6513
6514 static void __devexit
6515 bnx2_remove_one(struct pci_dev *pdev)
6516 {
6517         struct net_device *dev = pci_get_drvdata(pdev);
6518         struct bnx2 *bp = netdev_priv(dev);
6519
6520         flush_scheduled_work();
6521
6522         unregister_netdev(dev);
6523
6524         if (bp->regview)
6525                 iounmap(bp->regview);
6526
6527         free_netdev(dev);
6528         pci_release_regions(pdev);
6529         pci_disable_device(pdev);
6530         pci_set_drvdata(pdev, NULL);
6531 }
6532
6533 static int
6534 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6535 {
6536         struct net_device *dev = pci_get_drvdata(pdev);
6537         struct bnx2 *bp = netdev_priv(dev);
6538         u32 reset_code;
6539
6540         if (!netif_running(dev))
6541                 return 0;
6542
6543         flush_scheduled_work();
6544         bnx2_netif_stop(bp);
6545         netif_device_detach(dev);
6546         del_timer_sync(&bp->timer);
6547         if (bp->flags & NO_WOL_FLAG)
6548                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6549         else if (bp->wol)
6550                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6551         else
6552                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6553         bnx2_reset_chip(bp, reset_code);
6554         bnx2_free_skbs(bp);
6555         pci_save_state(pdev);
6556         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6557         return 0;
6558 }
6559
6560 static int
6561 bnx2_resume(struct pci_dev *pdev)
6562 {
6563         struct net_device *dev = pci_get_drvdata(pdev);
6564         struct bnx2 *bp = netdev_priv(dev);
6565
6566         if (!netif_running(dev))
6567                 return 0;
6568
6569         pci_restore_state(pdev);
6570         bnx2_set_power_state(bp, PCI_D0);
6571         netif_device_attach(dev);
6572         bnx2_init_nic(bp);
6573         bnx2_netif_start(bp);
6574         return 0;
6575 }
6576
6577 static struct pci_driver bnx2_pci_driver = {
6578         .name           = DRV_MODULE_NAME,
6579         .id_table       = bnx2_pci_tbl,
6580         .probe          = bnx2_init_one,
6581         .remove         = __devexit_p(bnx2_remove_one),
6582         .suspend        = bnx2_suspend,
6583         .resume         = bnx2_resume,
6584 };
6585
6586 static int __init bnx2_init(void)
6587 {
6588         return pci_register_driver(&bnx2_pci_driver);
6589 }
6590
6591 static void __exit bnx2_cleanup(void)
6592 {
6593         pci_unregister_driver(&bnx2_pci_driver);
6594 }
6595
6596 module_init(bnx2_init);
6597 module_exit(bnx2_cleanup);
6598
6599
6600