[BNX2]: Add support for remote PHY.
[sfrench/cifs-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "1.5.11"
58 #define DRV_MODULE_RELDATE      "June 4, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT  (5*HZ)
64
65 static const char version[] __devinitdata =
66         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79         BCM5706 = 0,
80         NC370T,
81         NC370I,
82         BCM5706S,
83         NC370F,
84         BCM5708,
85         BCM5708S,
86         BCM5709,
87         BCM5709S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92         char *name;
93 } board_info[] __devinitdata = {
94         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95         { "HP NC370T Multifunction Gigabit Server Adapter" },
96         { "HP NC370i Multifunction Gigabit Server Adapter" },
97         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98         { "HP NC370F Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
103         };
104
105 static struct pci_device_id bnx2_pci_tbl[] = {
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
124         { 0, }
125 };
126
127 static struct flash_spec flash_table[] =
128 {
129         /* Slow EEPROM */
130         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133          "EEPROM - slow"},
134         /* Expansion entry 0001 */
135         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138          "Entry 0001"},
139         /* Saifun SA25F010 (non-buffered flash) */
140         /* strap, cfg1, & write1 need updates */
141         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144          "Non-buffered flash (128kB)"},
145         /* Saifun SA25F020 (non-buffered flash) */
146         /* strap, cfg1, & write1 need updates */
147         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150          "Non-buffered flash (256kB)"},
151         /* Expansion entry 0100 */
152         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155          "Entry 0100"},
156         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166         /* Saifun SA25F005 (non-buffered flash) */
167         /* strap, cfg1, & write1 need updates */
168         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171          "Non-buffered flash (64kB)"},
172         /* Fast EEPROM */
173         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176          "EEPROM - fast"},
177         /* Expansion entry 1001 */
178         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181          "Entry 1001"},
182         /* Expansion entry 1010 */
183         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186          "Entry 1010"},
187         /* ATMEL AT45DB011B (buffered flash) */
188         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191          "Buffered flash (128kB)"},
192         /* Expansion entry 1100 */
193         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196          "Entry 1100"},
197         /* Expansion entry 1101 */
198         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201          "Entry 1101"},
202         /* Ateml Expansion entry 1110 */
203         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1110 (Atmel)"},
207         /* ATMEL AT45DB021B (buffered flash) */
208         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211          "Buffered flash (256kB)"},
212 };
213
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217 {
218         u32 diff;
219
220         smp_mb();
221
222         /* The ring uses 256 indices for 255 entries, one of them
223          * needs to be skipped.
224          */
225         diff = bp->tx_prod - bp->tx_cons;
226         if (unlikely(diff >= TX_DESC_CNT)) {
227                 diff &= 0xffff;
228                 if (diff == TX_DESC_CNT)
229                         diff = MAX_TX_DESC_CNT;
230         }
231         return (bp->tx_ring_size - diff);
232 }
233
234 static u32
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236 {
237         u32 val;
238
239         spin_lock_bh(&bp->indirect_lock);
240         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242         spin_unlock_bh(&bp->indirect_lock);
243         return val;
244 }
245
246 static void
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248 {
249         spin_lock_bh(&bp->indirect_lock);
250         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252         spin_unlock_bh(&bp->indirect_lock);
253 }
254
255 static void
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257 {
258         offset += cid_addr;
259         spin_lock_bh(&bp->indirect_lock);
260         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261                 int i;
262
263                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266                 for (i = 0; i < 5; i++) {
267                         u32 val;
268                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270                                 break;
271                         udelay(5);
272                 }
273         } else {
274                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275                 REG_WR(bp, BNX2_CTX_DATA, val);
276         }
277         spin_unlock_bh(&bp->indirect_lock);
278 }
279
280 static int
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282 {
283         u32 val1;
284         int i, ret;
285
286         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293                 udelay(40);
294         }
295
296         val1 = (bp->phy_addr << 21) | (reg << 16) |
297                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298                 BNX2_EMAC_MDIO_COMM_START_BUSY;
299         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301         for (i = 0; i < 50; i++) {
302                 udelay(10);
303
304                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306                         udelay(5);
307
308                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311                         break;
312                 }
313         }
314
315         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316                 *val = 0x0;
317                 ret = -EBUSY;
318         }
319         else {
320                 *val = val1;
321                 ret = 0;
322         }
323
324         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331                 udelay(40);
332         }
333
334         return ret;
335 }
336
337 static int
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339 {
340         u32 val1;
341         int i, ret;
342
343         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350                 udelay(40);
351         }
352
353         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
357
358         for (i = 0; i < 50; i++) {
359                 udelay(10);
360
361                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363                         udelay(5);
364                         break;
365                 }
366         }
367
368         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369                 ret = -EBUSY;
370         else
371                 ret = 0;
372
373         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380                 udelay(40);
381         }
382
383         return ret;
384 }
385
386 static void
387 bnx2_disable_int(struct bnx2 *bp)
388 {
389         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392 }
393
394 static void
395 bnx2_enable_int(struct bnx2 *bp)
396 {
397         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
404         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
405 }
406
407 static void
408 bnx2_disable_int_sync(struct bnx2 *bp)
409 {
410         atomic_inc(&bp->intr_sem);
411         bnx2_disable_int(bp);
412         synchronize_irq(bp->pdev->irq);
413 }
414
415 static void
416 bnx2_netif_stop(struct bnx2 *bp)
417 {
418         bnx2_disable_int_sync(bp);
419         if (netif_running(bp->dev)) {
420                 netif_poll_disable(bp->dev);
421                 netif_tx_disable(bp->dev);
422                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423         }
424 }
425
426 static void
427 bnx2_netif_start(struct bnx2 *bp)
428 {
429         if (atomic_dec_and_test(&bp->intr_sem)) {
430                 if (netif_running(bp->dev)) {
431                         netif_wake_queue(bp->dev);
432                         netif_poll_enable(bp->dev);
433                         bnx2_enable_int(bp);
434                 }
435         }
436 }
437
438 static void
439 bnx2_free_mem(struct bnx2 *bp)
440 {
441         int i;
442
443         for (i = 0; i < bp->ctx_pages; i++) {
444                 if (bp->ctx_blk[i]) {
445                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446                                             bp->ctx_blk[i],
447                                             bp->ctx_blk_mapping[i]);
448                         bp->ctx_blk[i] = NULL;
449                 }
450         }
451         if (bp->status_blk) {
452                 pci_free_consistent(bp->pdev, bp->status_stats_size,
453                                     bp->status_blk, bp->status_blk_mapping);
454                 bp->status_blk = NULL;
455                 bp->stats_blk = NULL;
456         }
457         if (bp->tx_desc_ring) {
458                 pci_free_consistent(bp->pdev,
459                                     sizeof(struct tx_bd) * TX_DESC_CNT,
460                                     bp->tx_desc_ring, bp->tx_desc_mapping);
461                 bp->tx_desc_ring = NULL;
462         }
463         kfree(bp->tx_buf_ring);
464         bp->tx_buf_ring = NULL;
465         for (i = 0; i < bp->rx_max_ring; i++) {
466                 if (bp->rx_desc_ring[i])
467                         pci_free_consistent(bp->pdev,
468                                             sizeof(struct rx_bd) * RX_DESC_CNT,
469                                             bp->rx_desc_ring[i],
470                                             bp->rx_desc_mapping[i]);
471                 bp->rx_desc_ring[i] = NULL;
472         }
473         vfree(bp->rx_buf_ring);
474         bp->rx_buf_ring = NULL;
475 }
476
477 static int
478 bnx2_alloc_mem(struct bnx2 *bp)
479 {
480         int i, status_blk_size;
481
482         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483                                   GFP_KERNEL);
484         if (bp->tx_buf_ring == NULL)
485                 return -ENOMEM;
486
487         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488                                                 sizeof(struct tx_bd) *
489                                                 TX_DESC_CNT,
490                                                 &bp->tx_desc_mapping);
491         if (bp->tx_desc_ring == NULL)
492                 goto alloc_mem_err;
493
494         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495                                   bp->rx_max_ring);
496         if (bp->rx_buf_ring == NULL)
497                 goto alloc_mem_err;
498
499         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500                                    bp->rx_max_ring);
501
502         for (i = 0; i < bp->rx_max_ring; i++) {
503                 bp->rx_desc_ring[i] =
504                         pci_alloc_consistent(bp->pdev,
505                                              sizeof(struct rx_bd) * RX_DESC_CNT,
506                                              &bp->rx_desc_mapping[i]);
507                 if (bp->rx_desc_ring[i] == NULL)
508                         goto alloc_mem_err;
509
510         }
511
512         /* Combine status and statistics blocks into one allocation. */
513         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514         bp->status_stats_size = status_blk_size +
515                                 sizeof(struct statistics_block);
516
517         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518                                               &bp->status_blk_mapping);
519         if (bp->status_blk == NULL)
520                 goto alloc_mem_err;
521
522         memset(bp->status_blk, 0, bp->status_stats_size);
523
524         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525                                   status_blk_size);
526
527         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
528
529         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531                 if (bp->ctx_pages == 0)
532                         bp->ctx_pages = 1;
533                 for (i = 0; i < bp->ctx_pages; i++) {
534                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535                                                 BCM_PAGE_SIZE,
536                                                 &bp->ctx_blk_mapping[i]);
537                         if (bp->ctx_blk[i] == NULL)
538                                 goto alloc_mem_err;
539                 }
540         }
541         return 0;
542
543 alloc_mem_err:
544         bnx2_free_mem(bp);
545         return -ENOMEM;
546 }
547
548 static void
549 bnx2_report_fw_link(struct bnx2 *bp)
550 {
551         u32 fw_link_status = 0;
552
553         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
554                 return;
555
556         if (bp->link_up) {
557                 u32 bmsr;
558
559                 switch (bp->line_speed) {
560                 case SPEED_10:
561                         if (bp->duplex == DUPLEX_HALF)
562                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
563                         else
564                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
565                         break;
566                 case SPEED_100:
567                         if (bp->duplex == DUPLEX_HALF)
568                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
569                         else
570                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
571                         break;
572                 case SPEED_1000:
573                         if (bp->duplex == DUPLEX_HALF)
574                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
575                         else
576                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
577                         break;
578                 case SPEED_2500:
579                         if (bp->duplex == DUPLEX_HALF)
580                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
581                         else
582                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583                         break;
584                 }
585
586                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
587
588                 if (bp->autoneg) {
589                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
590
591                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
593
594                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
597                         else
598                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599                 }
600         }
601         else
602                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
603
604         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605 }
606
607 static void
608 bnx2_report_link(struct bnx2 *bp)
609 {
610         if (bp->link_up) {
611                 netif_carrier_on(bp->dev);
612                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
613
614                 printk("%d Mbps ", bp->line_speed);
615
616                 if (bp->duplex == DUPLEX_FULL)
617                         printk("full duplex");
618                 else
619                         printk("half duplex");
620
621                 if (bp->flow_ctrl) {
622                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
623                                 printk(", receive ");
624                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
625                                         printk("& transmit ");
626                         }
627                         else {
628                                 printk(", transmit ");
629                         }
630                         printk("flow control ON");
631                 }
632                 printk("\n");
633         }
634         else {
635                 netif_carrier_off(bp->dev);
636                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
637         }
638
639         bnx2_report_fw_link(bp);
640 }
641
642 static void
643 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
644 {
645         u32 local_adv, remote_adv;
646
647         bp->flow_ctrl = 0;
648         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
649                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
650
651                 if (bp->duplex == DUPLEX_FULL) {
652                         bp->flow_ctrl = bp->req_flow_ctrl;
653                 }
654                 return;
655         }
656
657         if (bp->duplex != DUPLEX_FULL) {
658                 return;
659         }
660
661         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
662             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
663                 u32 val;
664
665                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
666                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
667                         bp->flow_ctrl |= FLOW_CTRL_TX;
668                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
669                         bp->flow_ctrl |= FLOW_CTRL_RX;
670                 return;
671         }
672
673         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
674         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
675
676         if (bp->phy_flags & PHY_SERDES_FLAG) {
677                 u32 new_local_adv = 0;
678                 u32 new_remote_adv = 0;
679
680                 if (local_adv & ADVERTISE_1000XPAUSE)
681                         new_local_adv |= ADVERTISE_PAUSE_CAP;
682                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
683                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
684                 if (remote_adv & ADVERTISE_1000XPAUSE)
685                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
686                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
687                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
688
689                 local_adv = new_local_adv;
690                 remote_adv = new_remote_adv;
691         }
692
693         /* See Table 28B-3 of 802.3ab-1999 spec. */
694         if (local_adv & ADVERTISE_PAUSE_CAP) {
695                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
696                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
697                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
698                         }
699                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
700                                 bp->flow_ctrl = FLOW_CTRL_RX;
701                         }
702                 }
703                 else {
704                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
705                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
706                         }
707                 }
708         }
709         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
710                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
711                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
712
713                         bp->flow_ctrl = FLOW_CTRL_TX;
714                 }
715         }
716 }
717
718 static int
719 bnx2_5709s_linkup(struct bnx2 *bp)
720 {
721         u32 val, speed;
722
723         bp->link_up = 1;
724
725         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
726         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
727         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
728
729         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
730                 bp->line_speed = bp->req_line_speed;
731                 bp->duplex = bp->req_duplex;
732                 return 0;
733         }
734         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
735         switch (speed) {
736                 case MII_BNX2_GP_TOP_AN_SPEED_10:
737                         bp->line_speed = SPEED_10;
738                         break;
739                 case MII_BNX2_GP_TOP_AN_SPEED_100:
740                         bp->line_speed = SPEED_100;
741                         break;
742                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
743                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
744                         bp->line_speed = SPEED_1000;
745                         break;
746                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
747                         bp->line_speed = SPEED_2500;
748                         break;
749         }
750         if (val & MII_BNX2_GP_TOP_AN_FD)
751                 bp->duplex = DUPLEX_FULL;
752         else
753                 bp->duplex = DUPLEX_HALF;
754         return 0;
755 }
756
757 static int
758 bnx2_5708s_linkup(struct bnx2 *bp)
759 {
760         u32 val;
761
762         bp->link_up = 1;
763         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
764         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
765                 case BCM5708S_1000X_STAT1_SPEED_10:
766                         bp->line_speed = SPEED_10;
767                         break;
768                 case BCM5708S_1000X_STAT1_SPEED_100:
769                         bp->line_speed = SPEED_100;
770                         break;
771                 case BCM5708S_1000X_STAT1_SPEED_1G:
772                         bp->line_speed = SPEED_1000;
773                         break;
774                 case BCM5708S_1000X_STAT1_SPEED_2G5:
775                         bp->line_speed = SPEED_2500;
776                         break;
777         }
778         if (val & BCM5708S_1000X_STAT1_FD)
779                 bp->duplex = DUPLEX_FULL;
780         else
781                 bp->duplex = DUPLEX_HALF;
782
783         return 0;
784 }
785
786 static int
787 bnx2_5706s_linkup(struct bnx2 *bp)
788 {
789         u32 bmcr, local_adv, remote_adv, common;
790
791         bp->link_up = 1;
792         bp->line_speed = SPEED_1000;
793
794         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
795         if (bmcr & BMCR_FULLDPLX) {
796                 bp->duplex = DUPLEX_FULL;
797         }
798         else {
799                 bp->duplex = DUPLEX_HALF;
800         }
801
802         if (!(bmcr & BMCR_ANENABLE)) {
803                 return 0;
804         }
805
806         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
807         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
808
809         common = local_adv & remote_adv;
810         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
811
812                 if (common & ADVERTISE_1000XFULL) {
813                         bp->duplex = DUPLEX_FULL;
814                 }
815                 else {
816                         bp->duplex = DUPLEX_HALF;
817                 }
818         }
819
820         return 0;
821 }
822
823 static int
824 bnx2_copper_linkup(struct bnx2 *bp)
825 {
826         u32 bmcr;
827
828         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
829         if (bmcr & BMCR_ANENABLE) {
830                 u32 local_adv, remote_adv, common;
831
832                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
833                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
834
835                 common = local_adv & (remote_adv >> 2);
836                 if (common & ADVERTISE_1000FULL) {
837                         bp->line_speed = SPEED_1000;
838                         bp->duplex = DUPLEX_FULL;
839                 }
840                 else if (common & ADVERTISE_1000HALF) {
841                         bp->line_speed = SPEED_1000;
842                         bp->duplex = DUPLEX_HALF;
843                 }
844                 else {
845                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
846                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
847
848                         common = local_adv & remote_adv;
849                         if (common & ADVERTISE_100FULL) {
850                                 bp->line_speed = SPEED_100;
851                                 bp->duplex = DUPLEX_FULL;
852                         }
853                         else if (common & ADVERTISE_100HALF) {
854                                 bp->line_speed = SPEED_100;
855                                 bp->duplex = DUPLEX_HALF;
856                         }
857                         else if (common & ADVERTISE_10FULL) {
858                                 bp->line_speed = SPEED_10;
859                                 bp->duplex = DUPLEX_FULL;
860                         }
861                         else if (common & ADVERTISE_10HALF) {
862                                 bp->line_speed = SPEED_10;
863                                 bp->duplex = DUPLEX_HALF;
864                         }
865                         else {
866                                 bp->line_speed = 0;
867                                 bp->link_up = 0;
868                         }
869                 }
870         }
871         else {
872                 if (bmcr & BMCR_SPEED100) {
873                         bp->line_speed = SPEED_100;
874                 }
875                 else {
876                         bp->line_speed = SPEED_10;
877                 }
878                 if (bmcr & BMCR_FULLDPLX) {
879                         bp->duplex = DUPLEX_FULL;
880                 }
881                 else {
882                         bp->duplex = DUPLEX_HALF;
883                 }
884         }
885
886         return 0;
887 }
888
889 static int
890 bnx2_set_mac_link(struct bnx2 *bp)
891 {
892         u32 val;
893
894         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
895         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
896                 (bp->duplex == DUPLEX_HALF)) {
897                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
898         }
899
900         /* Configure the EMAC mode register. */
901         val = REG_RD(bp, BNX2_EMAC_MODE);
902
903         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
904                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
905                 BNX2_EMAC_MODE_25G_MODE);
906
907         if (bp->link_up) {
908                 switch (bp->line_speed) {
909                         case SPEED_10:
910                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
911                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
912                                         break;
913                                 }
914                                 /* fall through */
915                         case SPEED_100:
916                                 val |= BNX2_EMAC_MODE_PORT_MII;
917                                 break;
918                         case SPEED_2500:
919                                 val |= BNX2_EMAC_MODE_25G_MODE;
920                                 /* fall through */
921                         case SPEED_1000:
922                                 val |= BNX2_EMAC_MODE_PORT_GMII;
923                                 break;
924                 }
925         }
926         else {
927                 val |= BNX2_EMAC_MODE_PORT_GMII;
928         }
929
930         /* Set the MAC to operate in the appropriate duplex mode. */
931         if (bp->duplex == DUPLEX_HALF)
932                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
933         REG_WR(bp, BNX2_EMAC_MODE, val);
934
935         /* Enable/disable rx PAUSE. */
936         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
937
938         if (bp->flow_ctrl & FLOW_CTRL_RX)
939                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
940         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
941
942         /* Enable/disable tx PAUSE. */
943         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
944         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
945
946         if (bp->flow_ctrl & FLOW_CTRL_TX)
947                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
948         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
949
950         /* Acknowledge the interrupt. */
951         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
952
953         return 0;
954 }
955
956 static void
957 bnx2_enable_bmsr1(struct bnx2 *bp)
958 {
959         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
960             (CHIP_NUM(bp) == CHIP_NUM_5709))
961                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
962                                MII_BNX2_BLK_ADDR_GP_STATUS);
963 }
964
965 static void
966 bnx2_disable_bmsr1(struct bnx2 *bp)
967 {
968         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
969             (CHIP_NUM(bp) == CHIP_NUM_5709))
970                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
971                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
972 }
973
974 static int
975 bnx2_test_and_enable_2g5(struct bnx2 *bp)
976 {
977         u32 up1;
978         int ret = 1;
979
980         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
981                 return 0;
982
983         if (bp->autoneg & AUTONEG_SPEED)
984                 bp->advertising |= ADVERTISED_2500baseX_Full;
985
986         if (CHIP_NUM(bp) == CHIP_NUM_5709)
987                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
988
989         bnx2_read_phy(bp, bp->mii_up1, &up1);
990         if (!(up1 & BCM5708S_UP1_2G5)) {
991                 up1 |= BCM5708S_UP1_2G5;
992                 bnx2_write_phy(bp, bp->mii_up1, up1);
993                 ret = 0;
994         }
995
996         if (CHIP_NUM(bp) == CHIP_NUM_5709)
997                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
998                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
999
1000         return ret;
1001 }
1002
1003 static int
1004 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1005 {
1006         u32 up1;
1007         int ret = 0;
1008
1009         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1010                 return 0;
1011
1012         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1013                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1014
1015         bnx2_read_phy(bp, bp->mii_up1, &up1);
1016         if (up1 & BCM5708S_UP1_2G5) {
1017                 up1 &= ~BCM5708S_UP1_2G5;
1018                 bnx2_write_phy(bp, bp->mii_up1, up1);
1019                 ret = 1;
1020         }
1021
1022         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1024                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1025
1026         return ret;
1027 }
1028
1029 static void
1030 bnx2_enable_forced_2g5(struct bnx2 *bp)
1031 {
1032         u32 bmcr;
1033
1034         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1035                 return;
1036
1037         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1038                 u32 val;
1039
1040                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1041                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1042                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1043                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1044                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1045                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1046
1047                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1048                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1049                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050
1051         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1052                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1053                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1054         }
1055
1056         if (bp->autoneg & AUTONEG_SPEED) {
1057                 bmcr &= ~BMCR_ANENABLE;
1058                 if (bp->req_duplex == DUPLEX_FULL)
1059                         bmcr |= BMCR_FULLDPLX;
1060         }
1061         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1062 }
1063
1064 static void
1065 bnx2_disable_forced_2g5(struct bnx2 *bp)
1066 {
1067         u32 bmcr;
1068
1069         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1070                 return;
1071
1072         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1073                 u32 val;
1074
1075                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1076                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1077                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1078                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1079                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1080
1081                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1082                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1083                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084
1085         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1086                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1087                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1088         }
1089
1090         if (bp->autoneg & AUTONEG_SPEED)
1091                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1092         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1093 }
1094
1095 static int
1096 bnx2_set_link(struct bnx2 *bp)
1097 {
1098         u32 bmsr;
1099         u8 link_up;
1100
1101         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1102                 bp->link_up = 1;
1103                 return 0;
1104         }
1105
1106         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1107                 return 0;
1108
1109         link_up = bp->link_up;
1110
1111         bnx2_enable_bmsr1(bp);
1112         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1113         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1114         bnx2_disable_bmsr1(bp);
1115
1116         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1117             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1118                 u32 val;
1119
1120                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1121                 if (val & BNX2_EMAC_STATUS_LINK)
1122                         bmsr |= BMSR_LSTATUS;
1123                 else
1124                         bmsr &= ~BMSR_LSTATUS;
1125         }
1126
1127         if (bmsr & BMSR_LSTATUS) {
1128                 bp->link_up = 1;
1129
1130                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1131                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1132                                 bnx2_5706s_linkup(bp);
1133                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1134                                 bnx2_5708s_linkup(bp);
1135                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1136                                 bnx2_5709s_linkup(bp);
1137                 }
1138                 else {
1139                         bnx2_copper_linkup(bp);
1140                 }
1141                 bnx2_resolve_flow_ctrl(bp);
1142         }
1143         else {
1144                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1145                     (bp->autoneg & AUTONEG_SPEED))
1146                         bnx2_disable_forced_2g5(bp);
1147
1148                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1149                 bp->link_up = 0;
1150         }
1151
1152         if (bp->link_up != link_up) {
1153                 bnx2_report_link(bp);
1154         }
1155
1156         bnx2_set_mac_link(bp);
1157
1158         return 0;
1159 }
1160
1161 static int
1162 bnx2_reset_phy(struct bnx2 *bp)
1163 {
1164         int i;
1165         u32 reg;
1166
1167         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1168
1169 #define PHY_RESET_MAX_WAIT 100
1170         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1171                 udelay(10);
1172
1173                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1174                 if (!(reg & BMCR_RESET)) {
1175                         udelay(20);
1176                         break;
1177                 }
1178         }
1179         if (i == PHY_RESET_MAX_WAIT) {
1180                 return -EBUSY;
1181         }
1182         return 0;
1183 }
1184
1185 static u32
1186 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1187 {
1188         u32 adv = 0;
1189
1190         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1191                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1192
1193                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1194                         adv = ADVERTISE_1000XPAUSE;
1195                 }
1196                 else {
1197                         adv = ADVERTISE_PAUSE_CAP;
1198                 }
1199         }
1200         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1201                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1202                         adv = ADVERTISE_1000XPSE_ASYM;
1203                 }
1204                 else {
1205                         adv = ADVERTISE_PAUSE_ASYM;
1206                 }
1207         }
1208         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1209                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1210                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1211                 }
1212                 else {
1213                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1214                 }
1215         }
1216         return adv;
1217 }
1218
1219 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1220
1221 static int
1222 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1223 {
1224         u32 speed_arg = 0, pause_adv;
1225
1226         pause_adv = bnx2_phy_get_pause_adv(bp);
1227
1228         if (bp->autoneg & AUTONEG_SPEED) {
1229                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1230                 if (bp->advertising & ADVERTISED_10baseT_Half)
1231                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1232                 if (bp->advertising & ADVERTISED_10baseT_Full)
1233                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1234                 if (bp->advertising & ADVERTISED_100baseT_Half)
1235                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1236                 if (bp->advertising & ADVERTISED_100baseT_Full)
1237                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1238                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1239                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1240                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1241                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1242         } else {
1243                 if (bp->req_line_speed == SPEED_2500)
1244                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1245                 else if (bp->req_line_speed == SPEED_1000)
1246                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1247                 else if (bp->req_line_speed == SPEED_100) {
1248                         if (bp->req_duplex == DUPLEX_FULL)
1249                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1250                         else
1251                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1252                 } else if (bp->req_line_speed == SPEED_10) {
1253                         if (bp->req_duplex == DUPLEX_FULL)
1254                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1255                         else
1256                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1257                 }
1258         }
1259
1260         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1261                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1262         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1263                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1264
1265         if (port == PORT_TP)
1266                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1267                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1268
1269         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1270
1271         spin_unlock_bh(&bp->phy_lock);
1272         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1273         spin_lock_bh(&bp->phy_lock);
1274
1275         return 0;
1276 }
1277
1278 static int
1279 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1280 {
1281         u32 adv, bmcr;
1282         u32 new_adv = 0;
1283
1284         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1285                 return (bnx2_setup_remote_phy(bp, port));
1286
1287         if (!(bp->autoneg & AUTONEG_SPEED)) {
1288                 u32 new_bmcr;
1289                 int force_link_down = 0;
1290
1291                 if (bp->req_line_speed == SPEED_2500) {
1292                         if (!bnx2_test_and_enable_2g5(bp))
1293                                 force_link_down = 1;
1294                 } else if (bp->req_line_speed == SPEED_1000) {
1295                         if (bnx2_test_and_disable_2g5(bp))
1296                                 force_link_down = 1;
1297                 }
1298                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1299                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1300
1301                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1302                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1303                 new_bmcr |= BMCR_SPEED1000;
1304
1305                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1306                         if (bp->req_line_speed == SPEED_2500)
1307                                 bnx2_enable_forced_2g5(bp);
1308                         else if (bp->req_line_speed == SPEED_1000) {
1309                                 bnx2_disable_forced_2g5(bp);
1310                                 new_bmcr &= ~0x2000;
1311                         }
1312
1313                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1314                         if (bp->req_line_speed == SPEED_2500)
1315                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1316                         else
1317                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1318                 }
1319
1320                 if (bp->req_duplex == DUPLEX_FULL) {
1321                         adv |= ADVERTISE_1000XFULL;
1322                         new_bmcr |= BMCR_FULLDPLX;
1323                 }
1324                 else {
1325                         adv |= ADVERTISE_1000XHALF;
1326                         new_bmcr &= ~BMCR_FULLDPLX;
1327                 }
1328                 if ((new_bmcr != bmcr) || (force_link_down)) {
1329                         /* Force a link down visible on the other side */
1330                         if (bp->link_up) {
1331                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1332                                                ~(ADVERTISE_1000XFULL |
1333                                                  ADVERTISE_1000XHALF));
1334                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1335                                         BMCR_ANRESTART | BMCR_ANENABLE);
1336
1337                                 bp->link_up = 0;
1338                                 netif_carrier_off(bp->dev);
1339                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1340                                 bnx2_report_link(bp);
1341                         }
1342                         bnx2_write_phy(bp, bp->mii_adv, adv);
1343                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1344                 } else {
1345                         bnx2_resolve_flow_ctrl(bp);
1346                         bnx2_set_mac_link(bp);
1347                 }
1348                 return 0;
1349         }
1350
1351         bnx2_test_and_enable_2g5(bp);
1352
1353         if (bp->advertising & ADVERTISED_1000baseT_Full)
1354                 new_adv |= ADVERTISE_1000XFULL;
1355
1356         new_adv |= bnx2_phy_get_pause_adv(bp);
1357
1358         bnx2_read_phy(bp, bp->mii_adv, &adv);
1359         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1360
1361         bp->serdes_an_pending = 0;
1362         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1363                 /* Force a link down visible on the other side */
1364                 if (bp->link_up) {
1365                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1366                         spin_unlock_bh(&bp->phy_lock);
1367                         msleep(20);
1368                         spin_lock_bh(&bp->phy_lock);
1369                 }
1370
1371                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1372                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1373                         BMCR_ANENABLE);
1374                 /* Speed up link-up time when the link partner
1375                  * does not autonegotiate which is very common
1376                  * in blade servers. Some blade servers use
1377                  * IPMI for kerboard input and it's important
1378                  * to minimize link disruptions. Autoneg. involves
1379                  * exchanging base pages plus 3 next pages and
1380                  * normally completes in about 120 msec.
1381                  */
1382                 bp->current_interval = SERDES_AN_TIMEOUT;
1383                 bp->serdes_an_pending = 1;
1384                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1385         } else {
1386                 bnx2_resolve_flow_ctrl(bp);
1387                 bnx2_set_mac_link(bp);
1388         }
1389
1390         return 0;
1391 }
1392
1393 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1394         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1395                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1396                 (ADVERTISED_1000baseT_Full)
1397
1398 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1399         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1400         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1401         ADVERTISED_1000baseT_Full)
1402
1403 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1404         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1405
1406 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1407
1408 static void
1409 bnx2_set_default_remote_link(struct bnx2 *bp)
1410 {
1411         u32 link;
1412
1413         if (bp->phy_port == PORT_TP)
1414                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1415         else
1416                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1417
1418         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1419                 bp->req_line_speed = 0;
1420                 bp->autoneg |= AUTONEG_SPEED;
1421                 bp->advertising = ADVERTISED_Autoneg;
1422                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1423                         bp->advertising |= ADVERTISED_10baseT_Half;
1424                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1425                         bp->advertising |= ADVERTISED_10baseT_Full;
1426                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1427                         bp->advertising |= ADVERTISED_100baseT_Half;
1428                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1429                         bp->advertising |= ADVERTISED_100baseT_Full;
1430                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1431                         bp->advertising |= ADVERTISED_1000baseT_Full;
1432                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1433                         bp->advertising |= ADVERTISED_2500baseX_Full;
1434         } else {
1435                 bp->autoneg = 0;
1436                 bp->advertising = 0;
1437                 bp->req_duplex = DUPLEX_FULL;
1438                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1439                         bp->req_line_speed = SPEED_10;
1440                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1441                                 bp->req_duplex = DUPLEX_HALF;
1442                 }
1443                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1444                         bp->req_line_speed = SPEED_100;
1445                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1446                                 bp->req_duplex = DUPLEX_HALF;
1447                 }
1448                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1449                         bp->req_line_speed = SPEED_1000;
1450                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1451                         bp->req_line_speed = SPEED_2500;
1452         }
1453 }
1454
1455 static void
1456 bnx2_set_default_link(struct bnx2 *bp)
1457 {
1458         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1459                 return bnx2_set_default_remote_link(bp);
1460
1461         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1462         bp->req_line_speed = 0;
1463         if (bp->phy_flags & PHY_SERDES_FLAG) {
1464                 u32 reg;
1465
1466                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1467
1468                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1469                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1470                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1471                         bp->autoneg = 0;
1472                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1473                         bp->req_duplex = DUPLEX_FULL;
1474                 }
1475         } else
1476                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1477 }
1478
1479 static void
1480 bnx2_remote_phy_event(struct bnx2 *bp)
1481 {
1482         u32 msg;
1483         u8 link_up = bp->link_up;
1484         u8 old_port;
1485
1486         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1487
1488         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1489                 bp->link_up = 0;
1490         else {
1491                 u32 speed;
1492
1493                 bp->link_up = 1;
1494                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1495                 bp->duplex = DUPLEX_FULL;
1496                 switch (speed) {
1497                         case BNX2_LINK_STATUS_10HALF:
1498                                 bp->duplex = DUPLEX_HALF;
1499                         case BNX2_LINK_STATUS_10FULL:
1500                                 bp->line_speed = SPEED_10;
1501                                 break;
1502                         case BNX2_LINK_STATUS_100HALF:
1503                                 bp->duplex = DUPLEX_HALF;
1504                         case BNX2_LINK_STATUS_100BASE_T4:
1505                         case BNX2_LINK_STATUS_100FULL:
1506                                 bp->line_speed = SPEED_100;
1507                                 break;
1508                         case BNX2_LINK_STATUS_1000HALF:
1509                                 bp->duplex = DUPLEX_HALF;
1510                         case BNX2_LINK_STATUS_1000FULL:
1511                                 bp->line_speed = SPEED_1000;
1512                                 break;
1513                         case BNX2_LINK_STATUS_2500HALF:
1514                                 bp->duplex = DUPLEX_HALF;
1515                         case BNX2_LINK_STATUS_2500FULL:
1516                                 bp->line_speed = SPEED_2500;
1517                                 break;
1518                         default:
1519                                 bp->line_speed = 0;
1520                                 break;
1521                 }
1522
1523                 spin_lock(&bp->phy_lock);
1524                 bp->flow_ctrl = 0;
1525                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1526                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1527                         if (bp->duplex == DUPLEX_FULL)
1528                                 bp->flow_ctrl = bp->req_flow_ctrl;
1529                 } else {
1530                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1531                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1532                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1533                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1534                 }
1535
1536                 old_port = bp->phy_port;
1537                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1538                         bp->phy_port = PORT_FIBRE;
1539                 else
1540                         bp->phy_port = PORT_TP;
1541
1542                 if (old_port != bp->phy_port)
1543                         bnx2_set_default_link(bp);
1544
1545                 spin_unlock(&bp->phy_lock);
1546         }
1547         if (bp->link_up != link_up)
1548                 bnx2_report_link(bp);
1549
1550         bnx2_set_mac_link(bp);
1551 }
1552
1553 static int
1554 bnx2_set_remote_link(struct bnx2 *bp)
1555 {
1556         u32 evt_code;
1557
1558         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1559         switch (evt_code) {
1560                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1561                         bnx2_remote_phy_event(bp);
1562                         break;
1563                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1564                 default:
1565                         break;
1566         }
1567         return 0;
1568 }
1569
1570 static int
1571 bnx2_setup_copper_phy(struct bnx2 *bp)
1572 {
1573         u32 bmcr;
1574         u32 new_bmcr;
1575
1576         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1577
1578         if (bp->autoneg & AUTONEG_SPEED) {
1579                 u32 adv_reg, adv1000_reg;
1580                 u32 new_adv_reg = 0;
1581                 u32 new_adv1000_reg = 0;
1582
1583                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1584                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1585                         ADVERTISE_PAUSE_ASYM);
1586
1587                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1588                 adv1000_reg &= PHY_ALL_1000_SPEED;
1589
1590                 if (bp->advertising & ADVERTISED_10baseT_Half)
1591                         new_adv_reg |= ADVERTISE_10HALF;
1592                 if (bp->advertising & ADVERTISED_10baseT_Full)
1593                         new_adv_reg |= ADVERTISE_10FULL;
1594                 if (bp->advertising & ADVERTISED_100baseT_Half)
1595                         new_adv_reg |= ADVERTISE_100HALF;
1596                 if (bp->advertising & ADVERTISED_100baseT_Full)
1597                         new_adv_reg |= ADVERTISE_100FULL;
1598                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1599                         new_adv1000_reg |= ADVERTISE_1000FULL;
1600
1601                 new_adv_reg |= ADVERTISE_CSMA;
1602
1603                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1604
1605                 if ((adv1000_reg != new_adv1000_reg) ||
1606                         (adv_reg != new_adv_reg) ||
1607                         ((bmcr & BMCR_ANENABLE) == 0)) {
1608
1609                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1610                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1611                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1612                                 BMCR_ANENABLE);
1613                 }
1614                 else if (bp->link_up) {
1615                         /* Flow ctrl may have changed from auto to forced */
1616                         /* or vice-versa. */
1617
1618                         bnx2_resolve_flow_ctrl(bp);
1619                         bnx2_set_mac_link(bp);
1620                 }
1621                 return 0;
1622         }
1623
1624         new_bmcr = 0;
1625         if (bp->req_line_speed == SPEED_100) {
1626                 new_bmcr |= BMCR_SPEED100;
1627         }
1628         if (bp->req_duplex == DUPLEX_FULL) {
1629                 new_bmcr |= BMCR_FULLDPLX;
1630         }
1631         if (new_bmcr != bmcr) {
1632                 u32 bmsr;
1633
1634                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1635                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1636
1637                 if (bmsr & BMSR_LSTATUS) {
1638                         /* Force link down */
1639                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1640                         spin_unlock_bh(&bp->phy_lock);
1641                         msleep(50);
1642                         spin_lock_bh(&bp->phy_lock);
1643
1644                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1645                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1646                 }
1647
1648                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1649
1650                 /* Normally, the new speed is setup after the link has
1651                  * gone down and up again. In some cases, link will not go
1652                  * down so we need to set up the new speed here.
1653                  */
1654                 if (bmsr & BMSR_LSTATUS) {
1655                         bp->line_speed = bp->req_line_speed;
1656                         bp->duplex = bp->req_duplex;
1657                         bnx2_resolve_flow_ctrl(bp);
1658                         bnx2_set_mac_link(bp);
1659                 }
1660         } else {
1661                 bnx2_resolve_flow_ctrl(bp);
1662                 bnx2_set_mac_link(bp);
1663         }
1664         return 0;
1665 }
1666
1667 static int
1668 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1669 {
1670         if (bp->loopback == MAC_LOOPBACK)
1671                 return 0;
1672
1673         if (bp->phy_flags & PHY_SERDES_FLAG) {
1674                 return (bnx2_setup_serdes_phy(bp, port));
1675         }
1676         else {
1677                 return (bnx2_setup_copper_phy(bp));
1678         }
1679 }
1680
1681 static int
1682 bnx2_init_5709s_phy(struct bnx2 *bp)
1683 {
1684         u32 val;
1685
1686         bp->mii_bmcr = MII_BMCR + 0x10;
1687         bp->mii_bmsr = MII_BMSR + 0x10;
1688         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1689         bp->mii_adv = MII_ADVERTISE + 0x10;
1690         bp->mii_lpa = MII_LPA + 0x10;
1691         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1692
1693         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1694         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1695
1696         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1697         bnx2_reset_phy(bp);
1698
1699         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1700
1701         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1702         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1703         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1704         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1705
1706         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1707         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1708         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1709                 val |= BCM5708S_UP1_2G5;
1710         else
1711                 val &= ~BCM5708S_UP1_2G5;
1712         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1713
1714         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1715         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1716         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1717         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1718
1719         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1720
1721         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1722               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1723         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1724
1725         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1726
1727         return 0;
1728 }
1729
1730 static int
1731 bnx2_init_5708s_phy(struct bnx2 *bp)
1732 {
1733         u32 val;
1734
1735         bnx2_reset_phy(bp);
1736
1737         bp->mii_up1 = BCM5708S_UP1;
1738
1739         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1740         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1741         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1742
1743         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1744         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1745         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1746
1747         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1748         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1749         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1750
1751         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1752                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1753                 val |= BCM5708S_UP1_2G5;
1754                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1755         }
1756
1757         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1758             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1759             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1760                 /* increase tx signal amplitude */
1761                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1762                                BCM5708S_BLK_ADDR_TX_MISC);
1763                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1764                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1765                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1766                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1767         }
1768
1769         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1770               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1771
1772         if (val) {
1773                 u32 is_backplane;
1774
1775                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1776                                           BNX2_SHARED_HW_CFG_CONFIG);
1777                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1778                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1779                                        BCM5708S_BLK_ADDR_TX_MISC);
1780                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1781                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1782                                        BCM5708S_BLK_ADDR_DIG);
1783                 }
1784         }
1785         return 0;
1786 }
1787
1788 static int
1789 bnx2_init_5706s_phy(struct bnx2 *bp)
1790 {
1791         bnx2_reset_phy(bp);
1792
1793         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1794
1795         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1796                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1797
1798         if (bp->dev->mtu > 1500) {
1799                 u32 val;
1800
1801                 /* Set extended packet length bit */
1802                 bnx2_write_phy(bp, 0x18, 0x7);
1803                 bnx2_read_phy(bp, 0x18, &val);
1804                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1805
1806                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1807                 bnx2_read_phy(bp, 0x1c, &val);
1808                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1809         }
1810         else {
1811                 u32 val;
1812
1813                 bnx2_write_phy(bp, 0x18, 0x7);
1814                 bnx2_read_phy(bp, 0x18, &val);
1815                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1816
1817                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1818                 bnx2_read_phy(bp, 0x1c, &val);
1819                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1820         }
1821
1822         return 0;
1823 }
1824
1825 static int
1826 bnx2_init_copper_phy(struct bnx2 *bp)
1827 {
1828         u32 val;
1829
1830         bnx2_reset_phy(bp);
1831
1832         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1833                 bnx2_write_phy(bp, 0x18, 0x0c00);
1834                 bnx2_write_phy(bp, 0x17, 0x000a);
1835                 bnx2_write_phy(bp, 0x15, 0x310b);
1836                 bnx2_write_phy(bp, 0x17, 0x201f);
1837                 bnx2_write_phy(bp, 0x15, 0x9506);
1838                 bnx2_write_phy(bp, 0x17, 0x401f);
1839                 bnx2_write_phy(bp, 0x15, 0x14e2);
1840                 bnx2_write_phy(bp, 0x18, 0x0400);
1841         }
1842
1843         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1844                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1845                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1846                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1847                 val &= ~(1 << 8);
1848                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1849         }
1850
1851         if (bp->dev->mtu > 1500) {
1852                 /* Set extended packet length bit */
1853                 bnx2_write_phy(bp, 0x18, 0x7);
1854                 bnx2_read_phy(bp, 0x18, &val);
1855                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1856
1857                 bnx2_read_phy(bp, 0x10, &val);
1858                 bnx2_write_phy(bp, 0x10, val | 0x1);
1859         }
1860         else {
1861                 bnx2_write_phy(bp, 0x18, 0x7);
1862                 bnx2_read_phy(bp, 0x18, &val);
1863                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1864
1865                 bnx2_read_phy(bp, 0x10, &val);
1866                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1867         }
1868
1869         /* ethernet@wirespeed */
1870         bnx2_write_phy(bp, 0x18, 0x7007);
1871         bnx2_read_phy(bp, 0x18, &val);
1872         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1873         return 0;
1874 }
1875
1876
1877 static int
1878 bnx2_init_phy(struct bnx2 *bp)
1879 {
1880         u32 val;
1881         int rc = 0;
1882
1883         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1884         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1885
1886         bp->mii_bmcr = MII_BMCR;
1887         bp->mii_bmsr = MII_BMSR;
1888         bp->mii_bmsr1 = MII_BMSR;
1889         bp->mii_adv = MII_ADVERTISE;
1890         bp->mii_lpa = MII_LPA;
1891
1892         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1893
1894         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1895                 goto setup_phy;
1896
1897         bnx2_read_phy(bp, MII_PHYSID1, &val);
1898         bp->phy_id = val << 16;
1899         bnx2_read_phy(bp, MII_PHYSID2, &val);
1900         bp->phy_id |= val & 0xffff;
1901
1902         if (bp->phy_flags & PHY_SERDES_FLAG) {
1903                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1904                         rc = bnx2_init_5706s_phy(bp);
1905                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1906                         rc = bnx2_init_5708s_phy(bp);
1907                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1908                         rc = bnx2_init_5709s_phy(bp);
1909         }
1910         else {
1911                 rc = bnx2_init_copper_phy(bp);
1912         }
1913
1914 setup_phy:
1915         if (!rc)
1916                 rc = bnx2_setup_phy(bp, bp->phy_port);
1917
1918         return rc;
1919 }
1920
1921 static int
1922 bnx2_set_mac_loopback(struct bnx2 *bp)
1923 {
1924         u32 mac_mode;
1925
1926         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1927         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1928         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1929         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1930         bp->link_up = 1;
1931         return 0;
1932 }
1933
1934 static int bnx2_test_link(struct bnx2 *);
1935
1936 static int
1937 bnx2_set_phy_loopback(struct bnx2 *bp)
1938 {
1939         u32 mac_mode;
1940         int rc, i;
1941
1942         spin_lock_bh(&bp->phy_lock);
1943         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1944                             BMCR_SPEED1000);
1945         spin_unlock_bh(&bp->phy_lock);
1946         if (rc)
1947                 return rc;
1948
1949         for (i = 0; i < 10; i++) {
1950                 if (bnx2_test_link(bp) == 0)
1951                         break;
1952                 msleep(100);
1953         }
1954
1955         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1956         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1957                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1958                       BNX2_EMAC_MODE_25G_MODE);
1959
1960         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1961         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1962         bp->link_up = 1;
1963         return 0;
1964 }
1965
1966 static int
1967 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1968 {
1969         int i;
1970         u32 val;
1971
1972         bp->fw_wr_seq++;
1973         msg_data |= bp->fw_wr_seq;
1974
1975         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1976
1977         /* wait for an acknowledgement. */
1978         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1979                 msleep(10);
1980
1981                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1982
1983                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1984                         break;
1985         }
1986         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1987                 return 0;
1988
1989         /* If we timed out, inform the firmware that this is the case. */
1990         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1991                 if (!silent)
1992                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1993                                             "%x\n", msg_data);
1994
1995                 msg_data &= ~BNX2_DRV_MSG_CODE;
1996                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1997
1998                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1999
2000                 return -EBUSY;
2001         }
2002
2003         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2004                 return -EIO;
2005
2006         return 0;
2007 }
2008
2009 static int
2010 bnx2_init_5709_context(struct bnx2 *bp)
2011 {
2012         int i, ret = 0;
2013         u32 val;
2014
2015         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2016         val |= (BCM_PAGE_BITS - 8) << 16;
2017         REG_WR(bp, BNX2_CTX_COMMAND, val);
2018         for (i = 0; i < 10; i++) {
2019                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2020                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2021                         break;
2022                 udelay(2);
2023         }
2024         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2025                 return -EBUSY;
2026
2027         for (i = 0; i < bp->ctx_pages; i++) {
2028                 int j;
2029
2030                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2031                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2032                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2033                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2034                        (u64) bp->ctx_blk_mapping[i] >> 32);
2035                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2036                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2037                 for (j = 0; j < 10; j++) {
2038
2039                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2040                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2041                                 break;
2042                         udelay(5);
2043                 }
2044                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2045                         ret = -EBUSY;
2046                         break;
2047                 }
2048         }
2049         return ret;
2050 }
2051
2052 static void
2053 bnx2_init_context(struct bnx2 *bp)
2054 {
2055         u32 vcid;
2056
2057         vcid = 96;
2058         while (vcid) {
2059                 u32 vcid_addr, pcid_addr, offset;
2060                 int i;
2061
2062                 vcid--;
2063
2064                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2065                         u32 new_vcid;
2066
2067                         vcid_addr = GET_PCID_ADDR(vcid);
2068                         if (vcid & 0x8) {
2069                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2070                         }
2071                         else {
2072                                 new_vcid = vcid;
2073                         }
2074                         pcid_addr = GET_PCID_ADDR(new_vcid);
2075                 }
2076                 else {
2077                         vcid_addr = GET_CID_ADDR(vcid);
2078                         pcid_addr = vcid_addr;
2079                 }
2080
2081                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2082                         vcid_addr += (i << PHY_CTX_SHIFT);
2083                         pcid_addr += (i << PHY_CTX_SHIFT);
2084
2085                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2086                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2087
2088                         /* Zero out the context. */
2089                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2090                                 CTX_WR(bp, 0x00, offset, 0);
2091
2092                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2093                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2094                 }
2095         }
2096 }
2097
2098 static int
2099 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2100 {
2101         u16 *good_mbuf;
2102         u32 good_mbuf_cnt;
2103         u32 val;
2104
2105         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2106         if (good_mbuf == NULL) {
2107                 printk(KERN_ERR PFX "Failed to allocate memory in "
2108                                     "bnx2_alloc_bad_rbuf\n");
2109                 return -ENOMEM;
2110         }
2111
2112         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2113                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2114
2115         good_mbuf_cnt = 0;
2116
2117         /* Allocate a bunch of mbufs and save the good ones in an array. */
2118         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2119         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2120                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2121
2122                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2123
2124                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2125
2126                 /* The addresses with Bit 9 set are bad memory blocks. */
2127                 if (!(val & (1 << 9))) {
2128                         good_mbuf[good_mbuf_cnt] = (u16) val;
2129                         good_mbuf_cnt++;
2130                 }
2131
2132                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2133         }
2134
2135         /* Free the good ones back to the mbuf pool thus discarding
2136          * all the bad ones. */
2137         while (good_mbuf_cnt) {
2138                 good_mbuf_cnt--;
2139
2140                 val = good_mbuf[good_mbuf_cnt];
2141                 val = (val << 9) | val | 1;
2142
2143                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2144         }
2145         kfree(good_mbuf);
2146         return 0;
2147 }
2148
2149 static void
2150 bnx2_set_mac_addr(struct bnx2 *bp)
2151 {
2152         u32 val;
2153         u8 *mac_addr = bp->dev->dev_addr;
2154
2155         val = (mac_addr[0] << 8) | mac_addr[1];
2156
2157         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2158
2159         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2160                 (mac_addr[4] << 8) | mac_addr[5];
2161
2162         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2163 }
2164
2165 static inline int
2166 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2167 {
2168         struct sk_buff *skb;
2169         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2170         dma_addr_t mapping;
2171         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2172         unsigned long align;
2173
2174         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2175         if (skb == NULL) {
2176                 return -ENOMEM;
2177         }
2178
2179         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2180                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2181
2182         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2183                 PCI_DMA_FROMDEVICE);
2184
2185         rx_buf->skb = skb;
2186         pci_unmap_addr_set(rx_buf, mapping, mapping);
2187
2188         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2189         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2190
2191         bp->rx_prod_bseq += bp->rx_buf_use_size;
2192
2193         return 0;
2194 }
2195
2196 static int
2197 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2198 {
2199         struct status_block *sblk = bp->status_blk;
2200         u32 new_link_state, old_link_state;
2201         int is_set = 1;
2202
2203         new_link_state = sblk->status_attn_bits & event;
2204         old_link_state = sblk->status_attn_bits_ack & event;
2205         if (new_link_state != old_link_state) {
2206                 if (new_link_state)
2207                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2208                 else
2209                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2210         } else
2211                 is_set = 0;
2212
2213         return is_set;
2214 }
2215
2216 static void
2217 bnx2_phy_int(struct bnx2 *bp)
2218 {
2219         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2220                 spin_lock(&bp->phy_lock);
2221                 bnx2_set_link(bp);
2222                 spin_unlock(&bp->phy_lock);
2223         }
2224         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2225                 bnx2_set_remote_link(bp);
2226
2227 }
2228
2229 static void
2230 bnx2_tx_int(struct bnx2 *bp)
2231 {
2232         struct status_block *sblk = bp->status_blk;
2233         u16 hw_cons, sw_cons, sw_ring_cons;
2234         int tx_free_bd = 0;
2235
2236         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2237         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2238                 hw_cons++;
2239         }
2240         sw_cons = bp->tx_cons;
2241
2242         while (sw_cons != hw_cons) {
2243                 struct sw_bd *tx_buf;
2244                 struct sk_buff *skb;
2245                 int i, last;
2246
2247                 sw_ring_cons = TX_RING_IDX(sw_cons);
2248
2249                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2250                 skb = tx_buf->skb;
2251
2252                 /* partial BD completions possible with TSO packets */
2253                 if (skb_is_gso(skb)) {
2254                         u16 last_idx, last_ring_idx;
2255
2256                         last_idx = sw_cons +
2257                                 skb_shinfo(skb)->nr_frags + 1;
2258                         last_ring_idx = sw_ring_cons +
2259                                 skb_shinfo(skb)->nr_frags + 1;
2260                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2261                                 last_idx++;
2262                         }
2263                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2264                                 break;
2265                         }
2266                 }
2267
2268                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2269                         skb_headlen(skb), PCI_DMA_TODEVICE);
2270
2271                 tx_buf->skb = NULL;
2272                 last = skb_shinfo(skb)->nr_frags;
2273
2274                 for (i = 0; i < last; i++) {
2275                         sw_cons = NEXT_TX_BD(sw_cons);
2276
2277                         pci_unmap_page(bp->pdev,
2278                                 pci_unmap_addr(
2279                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2280                                         mapping),
2281                                 skb_shinfo(skb)->frags[i].size,
2282                                 PCI_DMA_TODEVICE);
2283                 }
2284
2285                 sw_cons = NEXT_TX_BD(sw_cons);
2286
2287                 tx_free_bd += last + 1;
2288
2289                 dev_kfree_skb(skb);
2290
2291                 hw_cons = bp->hw_tx_cons =
2292                         sblk->status_tx_quick_consumer_index0;
2293
2294                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2295                         hw_cons++;
2296                 }
2297         }
2298
2299         bp->tx_cons = sw_cons;
2300         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2301          * before checking for netif_queue_stopped().  Without the
2302          * memory barrier, there is a small possibility that bnx2_start_xmit()
2303          * will miss it and cause the queue to be stopped forever.
2304          */
2305         smp_mb();
2306
2307         if (unlikely(netif_queue_stopped(bp->dev)) &&
2308                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2309                 netif_tx_lock(bp->dev);
2310                 if ((netif_queue_stopped(bp->dev)) &&
2311                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2312                         netif_wake_queue(bp->dev);
2313                 netif_tx_unlock(bp->dev);
2314         }
2315 }
2316
2317 static inline void
2318 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2319         u16 cons, u16 prod)
2320 {
2321         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2322         struct rx_bd *cons_bd, *prod_bd;
2323
2324         cons_rx_buf = &bp->rx_buf_ring[cons];
2325         prod_rx_buf = &bp->rx_buf_ring[prod];
2326
2327         pci_dma_sync_single_for_device(bp->pdev,
2328                 pci_unmap_addr(cons_rx_buf, mapping),
2329                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2330
2331         bp->rx_prod_bseq += bp->rx_buf_use_size;
2332
2333         prod_rx_buf->skb = skb;
2334
2335         if (cons == prod)
2336                 return;
2337
2338         pci_unmap_addr_set(prod_rx_buf, mapping,
2339                         pci_unmap_addr(cons_rx_buf, mapping));
2340
2341         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2342         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2343         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2344         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2345 }
2346
2347 static int
2348 bnx2_rx_int(struct bnx2 *bp, int budget)
2349 {
2350         struct status_block *sblk = bp->status_blk;
2351         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2352         struct l2_fhdr *rx_hdr;
2353         int rx_pkt = 0;
2354
2355         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2356         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2357                 hw_cons++;
2358         }
2359         sw_cons = bp->rx_cons;
2360         sw_prod = bp->rx_prod;
2361
2362         /* Memory barrier necessary as speculative reads of the rx
2363          * buffer can be ahead of the index in the status block
2364          */
2365         rmb();
2366         while (sw_cons != hw_cons) {
2367                 unsigned int len;
2368                 u32 status;
2369                 struct sw_bd *rx_buf;
2370                 struct sk_buff *skb;
2371                 dma_addr_t dma_addr;
2372
2373                 sw_ring_cons = RX_RING_IDX(sw_cons);
2374                 sw_ring_prod = RX_RING_IDX(sw_prod);
2375
2376                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2377                 skb = rx_buf->skb;
2378
2379                 rx_buf->skb = NULL;
2380
2381                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2382
2383                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2384                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2385
2386                 rx_hdr = (struct l2_fhdr *) skb->data;
2387                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2388
2389                 if ((status = rx_hdr->l2_fhdr_status) &
2390                         (L2_FHDR_ERRORS_BAD_CRC |
2391                         L2_FHDR_ERRORS_PHY_DECODE |
2392                         L2_FHDR_ERRORS_ALIGNMENT |
2393                         L2_FHDR_ERRORS_TOO_SHORT |
2394                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2395
2396                         goto reuse_rx;
2397                 }
2398
2399                 /* Since we don't have a jumbo ring, copy small packets
2400                  * if mtu > 1500
2401                  */
2402                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2403                         struct sk_buff *new_skb;
2404
2405                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2406                         if (new_skb == NULL)
2407                                 goto reuse_rx;
2408
2409                         /* aligned copy */
2410                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2411                                       new_skb->data, len + 2);
2412                         skb_reserve(new_skb, 2);
2413                         skb_put(new_skb, len);
2414
2415                         bnx2_reuse_rx_skb(bp, skb,
2416                                 sw_ring_cons, sw_ring_prod);
2417
2418                         skb = new_skb;
2419                 }
2420                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2421                         pci_unmap_single(bp->pdev, dma_addr,
2422                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2423
2424                         skb_reserve(skb, bp->rx_offset);
2425                         skb_put(skb, len);
2426                 }
2427                 else {
2428 reuse_rx:
2429                         bnx2_reuse_rx_skb(bp, skb,
2430                                 sw_ring_cons, sw_ring_prod);
2431                         goto next_rx;
2432                 }
2433
2434                 skb->protocol = eth_type_trans(skb, bp->dev);
2435
2436                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2437                         (ntohs(skb->protocol) != 0x8100)) {
2438
2439                         dev_kfree_skb(skb);
2440                         goto next_rx;
2441
2442                 }
2443
2444                 skb->ip_summed = CHECKSUM_NONE;
2445                 if (bp->rx_csum &&
2446                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2447                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2448
2449                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2450                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2451                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2452                 }
2453
2454 #ifdef BCM_VLAN
2455                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2456                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2457                                 rx_hdr->l2_fhdr_vlan_tag);
2458                 }
2459                 else
2460 #endif
2461                         netif_receive_skb(skb);
2462
2463                 bp->dev->last_rx = jiffies;
2464                 rx_pkt++;
2465
2466 next_rx:
2467                 sw_cons = NEXT_RX_BD(sw_cons);
2468                 sw_prod = NEXT_RX_BD(sw_prod);
2469
2470                 if ((rx_pkt == budget))
2471                         break;
2472
2473                 /* Refresh hw_cons to see if there is new work */
2474                 if (sw_cons == hw_cons) {
2475                         hw_cons = bp->hw_rx_cons =
2476                                 sblk->status_rx_quick_consumer_index0;
2477                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2478                                 hw_cons++;
2479                         rmb();
2480                 }
2481         }
2482         bp->rx_cons = sw_cons;
2483         bp->rx_prod = sw_prod;
2484
2485         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2486
2487         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2488
2489         mmiowb();
2490
2491         return rx_pkt;
2492
2493 }
2494
2495 /* MSI ISR - The only difference between this and the INTx ISR
2496  * is that the MSI interrupt is always serviced.
2497  */
2498 static irqreturn_t
2499 bnx2_msi(int irq, void *dev_instance)
2500 {
2501         struct net_device *dev = dev_instance;
2502         struct bnx2 *bp = netdev_priv(dev);
2503
2504         prefetch(bp->status_blk);
2505         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2506                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2507                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2508
2509         /* Return here if interrupt is disabled. */
2510         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2511                 return IRQ_HANDLED;
2512
2513         netif_rx_schedule(dev);
2514
2515         return IRQ_HANDLED;
2516 }
2517
2518 static irqreturn_t
2519 bnx2_msi_1shot(int irq, void *dev_instance)
2520 {
2521         struct net_device *dev = dev_instance;
2522         struct bnx2 *bp = netdev_priv(dev);
2523
2524         prefetch(bp->status_blk);
2525
2526         /* Return here if interrupt is disabled. */
2527         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2528                 return IRQ_HANDLED;
2529
2530         netif_rx_schedule(dev);
2531
2532         return IRQ_HANDLED;
2533 }
2534
2535 static irqreturn_t
2536 bnx2_interrupt(int irq, void *dev_instance)
2537 {
2538         struct net_device *dev = dev_instance;
2539         struct bnx2 *bp = netdev_priv(dev);
2540
2541         /* When using INTx, it is possible for the interrupt to arrive
2542          * at the CPU before the status block posted prior to the
2543          * interrupt. Reading a register will flush the status block.
2544          * When using MSI, the MSI message will always complete after
2545          * the status block write.
2546          */
2547         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2548             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2549              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2550                 return IRQ_NONE;
2551
2552         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2553                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2554                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2555
2556         /* Return here if interrupt is shared and is disabled. */
2557         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2558                 return IRQ_HANDLED;
2559
2560         netif_rx_schedule(dev);
2561
2562         return IRQ_HANDLED;
2563 }
2564
2565 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2566                                  STATUS_ATTN_BITS_TIMER_ABORT)
2567
2568 static inline int
2569 bnx2_has_work(struct bnx2 *bp)
2570 {
2571         struct status_block *sblk = bp->status_blk;
2572
2573         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2574             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2575                 return 1;
2576
2577         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2578             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2579                 return 1;
2580
2581         return 0;
2582 }
2583
2584 static int
2585 bnx2_poll(struct net_device *dev, int *budget)
2586 {
2587         struct bnx2 *bp = netdev_priv(dev);
2588         struct status_block *sblk = bp->status_blk;
2589         u32 status_attn_bits = sblk->status_attn_bits;
2590         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2591
2592         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2593             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2594
2595                 bnx2_phy_int(bp);
2596
2597                 /* This is needed to take care of transient status
2598                  * during link changes.
2599                  */
2600                 REG_WR(bp, BNX2_HC_COMMAND,
2601                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2602                 REG_RD(bp, BNX2_HC_COMMAND);
2603         }
2604
2605         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2606                 bnx2_tx_int(bp);
2607
2608         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2609                 int orig_budget = *budget;
2610                 int work_done;
2611
2612                 if (orig_budget > dev->quota)
2613                         orig_budget = dev->quota;
2614
2615                 work_done = bnx2_rx_int(bp, orig_budget);
2616                 *budget -= work_done;
2617                 dev->quota -= work_done;
2618         }
2619
2620         bp->last_status_idx = bp->status_blk->status_idx;
2621         rmb();
2622
2623         if (!bnx2_has_work(bp)) {
2624                 netif_rx_complete(dev);
2625                 if (likely(bp->flags & USING_MSI_FLAG)) {
2626                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2627                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2628                                bp->last_status_idx);
2629                         return 0;
2630                 }
2631                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2632                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2633                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2634                        bp->last_status_idx);
2635
2636                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2637                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2638                        bp->last_status_idx);
2639                 return 0;
2640         }
2641
2642         return 1;
2643 }
2644
2645 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2646  * from set_multicast.
2647  */
2648 static void
2649 bnx2_set_rx_mode(struct net_device *dev)
2650 {
2651         struct bnx2 *bp = netdev_priv(dev);
2652         u32 rx_mode, sort_mode;
2653         int i;
2654
2655         spin_lock_bh(&bp->phy_lock);
2656
2657         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2658                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2659         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2660 #ifdef BCM_VLAN
2661         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2662                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2663 #else
2664         if (!(bp->flags & ASF_ENABLE_FLAG))
2665                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2666 #endif
2667         if (dev->flags & IFF_PROMISC) {
2668                 /* Promiscuous mode. */
2669                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2670                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2671                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2672         }
2673         else if (dev->flags & IFF_ALLMULTI) {
2674                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2675                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2676                                0xffffffff);
2677                 }
2678                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2679         }
2680         else {
2681                 /* Accept one or more multicast(s). */
2682                 struct dev_mc_list *mclist;
2683                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2684                 u32 regidx;
2685                 u32 bit;
2686                 u32 crc;
2687
2688                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2689
2690                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2691                      i++, mclist = mclist->next) {
2692
2693                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2694                         bit = crc & 0xff;
2695                         regidx = (bit & 0xe0) >> 5;
2696                         bit &= 0x1f;
2697                         mc_filter[regidx] |= (1 << bit);
2698                 }
2699
2700                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2701                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2702                                mc_filter[i]);
2703                 }
2704
2705                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2706         }
2707
2708         if (rx_mode != bp->rx_mode) {
2709                 bp->rx_mode = rx_mode;
2710                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2711         }
2712
2713         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2714         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2715         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2716
2717         spin_unlock_bh(&bp->phy_lock);
2718 }
2719
2720 #define FW_BUF_SIZE     0x8000
2721
2722 static int
2723 bnx2_gunzip_init(struct bnx2 *bp)
2724 {
2725         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2726                 goto gunzip_nomem1;
2727
2728         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2729                 goto gunzip_nomem2;
2730
2731         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2732         if (bp->strm->workspace == NULL)
2733                 goto gunzip_nomem3;
2734
2735         return 0;
2736
2737 gunzip_nomem3:
2738         kfree(bp->strm);
2739         bp->strm = NULL;
2740
2741 gunzip_nomem2:
2742         vfree(bp->gunzip_buf);
2743         bp->gunzip_buf = NULL;
2744
2745 gunzip_nomem1:
2746         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2747                             "uncompression.\n", bp->dev->name);
2748         return -ENOMEM;
2749 }
2750
2751 static void
2752 bnx2_gunzip_end(struct bnx2 *bp)
2753 {
2754         kfree(bp->strm->workspace);
2755
2756         kfree(bp->strm);
2757         bp->strm = NULL;
2758
2759         if (bp->gunzip_buf) {
2760                 vfree(bp->gunzip_buf);
2761                 bp->gunzip_buf = NULL;
2762         }
2763 }
2764
2765 static int
2766 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2767 {
2768         int n, rc;
2769
2770         /* check gzip header */
2771         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2772                 return -EINVAL;
2773
2774         n = 10;
2775
2776 #define FNAME   0x8
2777         if (zbuf[3] & FNAME)
2778                 while ((zbuf[n++] != 0) && (n < len));
2779
2780         bp->strm->next_in = zbuf + n;
2781         bp->strm->avail_in = len - n;
2782         bp->strm->next_out = bp->gunzip_buf;
2783         bp->strm->avail_out = FW_BUF_SIZE;
2784
2785         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2786         if (rc != Z_OK)
2787                 return rc;
2788
2789         rc = zlib_inflate(bp->strm, Z_FINISH);
2790
2791         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2792         *outbuf = bp->gunzip_buf;
2793
2794         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2795                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2796                        bp->dev->name, bp->strm->msg);
2797
2798         zlib_inflateEnd(bp->strm);
2799
2800         if (rc == Z_STREAM_END)
2801                 return 0;
2802
2803         return rc;
2804 }
2805
2806 static void
2807 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2808         u32 rv2p_proc)
2809 {
2810         int i;
2811         u32 val;
2812
2813
2814         for (i = 0; i < rv2p_code_len; i += 8) {
2815                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2816                 rv2p_code++;
2817                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2818                 rv2p_code++;
2819
2820                 if (rv2p_proc == RV2P_PROC1) {
2821                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2822                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2823                 }
2824                 else {
2825                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2826                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2827                 }
2828         }
2829
2830         /* Reset the processor, un-stall is done later. */
2831         if (rv2p_proc == RV2P_PROC1) {
2832                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2833         }
2834         else {
2835                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2836         }
2837 }
2838
2839 static int
2840 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2841 {
2842         u32 offset;
2843         u32 val;
2844         int rc;
2845
2846         /* Halt the CPU. */
2847         val = REG_RD_IND(bp, cpu_reg->mode);
2848         val |= cpu_reg->mode_value_halt;
2849         REG_WR_IND(bp, cpu_reg->mode, val);
2850         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2851
2852         /* Load the Text area. */
2853         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2854         if (fw->gz_text) {
2855                 u32 text_len;
2856                 void *text;
2857
2858                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2859                                  &text_len);
2860                 if (rc)
2861                         return rc;
2862
2863                 fw->text = text;
2864         }
2865         if (fw->gz_text) {
2866                 int j;
2867
2868                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2869                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2870                 }
2871         }
2872
2873         /* Load the Data area. */
2874         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2875         if (fw->data) {
2876                 int j;
2877
2878                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2879                         REG_WR_IND(bp, offset, fw->data[j]);
2880                 }
2881         }
2882
2883         /* Load the SBSS area. */
2884         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2885         if (fw->sbss) {
2886                 int j;
2887
2888                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2889                         REG_WR_IND(bp, offset, fw->sbss[j]);
2890                 }
2891         }
2892
2893         /* Load the BSS area. */
2894         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2895         if (fw->bss) {
2896                 int j;
2897
2898                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2899                         REG_WR_IND(bp, offset, fw->bss[j]);
2900                 }
2901         }
2902
2903         /* Load the Read-Only area. */
2904         offset = cpu_reg->spad_base +
2905                 (fw->rodata_addr - cpu_reg->mips_view_base);
2906         if (fw->rodata) {
2907                 int j;
2908
2909                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2910                         REG_WR_IND(bp, offset, fw->rodata[j]);
2911                 }
2912         }
2913
2914         /* Clear the pre-fetch instruction. */
2915         REG_WR_IND(bp, cpu_reg->inst, 0);
2916         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2917
2918         /* Start the CPU. */
2919         val = REG_RD_IND(bp, cpu_reg->mode);
2920         val &= ~cpu_reg->mode_value_halt;
2921         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2922         REG_WR_IND(bp, cpu_reg->mode, val);
2923
2924         return 0;
2925 }
2926
2927 static int
2928 bnx2_init_cpus(struct bnx2 *bp)
2929 {
2930         struct cpu_reg cpu_reg;
2931         struct fw_info *fw;
2932         int rc = 0;
2933         void *text;
2934         u32 text_len;
2935
2936         if ((rc = bnx2_gunzip_init(bp)) != 0)
2937                 return rc;
2938
2939         /* Initialize the RV2P processor. */
2940         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2941                          &text_len);
2942         if (rc)
2943                 goto init_cpu_err;
2944
2945         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2946
2947         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2948                          &text_len);
2949         if (rc)
2950                 goto init_cpu_err;
2951
2952         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2953
2954         /* Initialize the RX Processor. */
2955         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2956         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2957         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2958         cpu_reg.state = BNX2_RXP_CPU_STATE;
2959         cpu_reg.state_value_clear = 0xffffff;
2960         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2961         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2962         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2963         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2964         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2965         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2966         cpu_reg.mips_view_base = 0x8000000;
2967
2968         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2969                 fw = &bnx2_rxp_fw_09;
2970         else
2971                 fw = &bnx2_rxp_fw_06;
2972
2973         rc = load_cpu_fw(bp, &cpu_reg, fw);
2974         if (rc)
2975                 goto init_cpu_err;
2976
2977         /* Initialize the TX Processor. */
2978         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2979         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2980         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2981         cpu_reg.state = BNX2_TXP_CPU_STATE;
2982         cpu_reg.state_value_clear = 0xffffff;
2983         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2984         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2985         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2986         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2987         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2988         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2989         cpu_reg.mips_view_base = 0x8000000;
2990
2991         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2992                 fw = &bnx2_txp_fw_09;
2993         else
2994                 fw = &bnx2_txp_fw_06;
2995
2996         rc = load_cpu_fw(bp, &cpu_reg, fw);
2997         if (rc)
2998                 goto init_cpu_err;
2999
3000         /* Initialize the TX Patch-up Processor. */
3001         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3002         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3003         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3004         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3005         cpu_reg.state_value_clear = 0xffffff;
3006         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3007         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3008         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3009         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3010         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3011         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3012         cpu_reg.mips_view_base = 0x8000000;
3013
3014         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3015                 fw = &bnx2_tpat_fw_09;
3016         else
3017                 fw = &bnx2_tpat_fw_06;
3018
3019         rc = load_cpu_fw(bp, &cpu_reg, fw);
3020         if (rc)
3021                 goto init_cpu_err;
3022
3023         /* Initialize the Completion Processor. */
3024         cpu_reg.mode = BNX2_COM_CPU_MODE;
3025         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3026         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3027         cpu_reg.state = BNX2_COM_CPU_STATE;
3028         cpu_reg.state_value_clear = 0xffffff;
3029         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3030         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3031         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3032         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3033         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3034         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3035         cpu_reg.mips_view_base = 0x8000000;
3036
3037         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3038                 fw = &bnx2_com_fw_09;
3039         else
3040                 fw = &bnx2_com_fw_06;
3041
3042         rc = load_cpu_fw(bp, &cpu_reg, fw);
3043         if (rc)
3044                 goto init_cpu_err;
3045
3046         /* Initialize the Command Processor. */
3047         cpu_reg.mode = BNX2_CP_CPU_MODE;
3048         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3049         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3050         cpu_reg.state = BNX2_CP_CPU_STATE;
3051         cpu_reg.state_value_clear = 0xffffff;
3052         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3053         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3054         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3055         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3056         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3057         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3058         cpu_reg.mips_view_base = 0x8000000;
3059
3060         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3061                 fw = &bnx2_cp_fw_09;
3062
3063                 rc = load_cpu_fw(bp, &cpu_reg, fw);
3064                 if (rc)
3065                         goto init_cpu_err;
3066         }
3067 init_cpu_err:
3068         bnx2_gunzip_end(bp);
3069         return rc;
3070 }
3071
3072 static int
3073 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3074 {
3075         u16 pmcsr;
3076
3077         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3078
3079         switch (state) {
3080         case PCI_D0: {
3081                 u32 val;
3082
3083                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3084                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3085                         PCI_PM_CTRL_PME_STATUS);
3086
3087                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3088                         /* delay required during transition out of D3hot */
3089                         msleep(20);
3090
3091                 val = REG_RD(bp, BNX2_EMAC_MODE);
3092                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3093                 val &= ~BNX2_EMAC_MODE_MPKT;
3094                 REG_WR(bp, BNX2_EMAC_MODE, val);
3095
3096                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3097                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3098                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3099                 break;
3100         }
3101         case PCI_D3hot: {
3102                 int i;
3103                 u32 val, wol_msg;
3104
3105                 if (bp->wol) {
3106                         u32 advertising;
3107                         u8 autoneg;
3108
3109                         autoneg = bp->autoneg;
3110                         advertising = bp->advertising;
3111
3112                         bp->autoneg = AUTONEG_SPEED;
3113                         bp->advertising = ADVERTISED_10baseT_Half |
3114                                 ADVERTISED_10baseT_Full |
3115                                 ADVERTISED_100baseT_Half |
3116                                 ADVERTISED_100baseT_Full |
3117                                 ADVERTISED_Autoneg;
3118
3119                         bnx2_setup_copper_phy(bp);
3120
3121                         bp->autoneg = autoneg;
3122                         bp->advertising = advertising;
3123
3124                         bnx2_set_mac_addr(bp);
3125
3126                         val = REG_RD(bp, BNX2_EMAC_MODE);
3127
3128                         /* Enable port mode. */
3129                         val &= ~BNX2_EMAC_MODE_PORT;
3130                         val |= BNX2_EMAC_MODE_PORT_MII |
3131                                BNX2_EMAC_MODE_MPKT_RCVD |
3132                                BNX2_EMAC_MODE_ACPI_RCVD |
3133                                BNX2_EMAC_MODE_MPKT;
3134
3135                         REG_WR(bp, BNX2_EMAC_MODE, val);
3136
3137                         /* receive all multicast */
3138                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3139                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3140                                        0xffffffff);
3141                         }
3142                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3143                                BNX2_EMAC_RX_MODE_SORT_MODE);
3144
3145                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3146                               BNX2_RPM_SORT_USER0_MC_EN;
3147                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3148                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3149                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3150                                BNX2_RPM_SORT_USER0_ENA);
3151
3152                         /* Need to enable EMAC and RPM for WOL. */
3153                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3154                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3155                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3156                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3157
3158                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3159                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3160                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3161
3162                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3163                 }
3164                 else {
3165                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3166                 }
3167
3168                 if (!(bp->flags & NO_WOL_FLAG))
3169                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3170
3171                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3172                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3173                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3174
3175                         if (bp->wol)
3176                                 pmcsr |= 3;
3177                 }
3178                 else {
3179                         pmcsr |= 3;
3180                 }
3181                 if (bp->wol) {
3182                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3183                 }
3184                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3185                                       pmcsr);
3186
3187                 /* No more memory access after this point until
3188                  * device is brought back to D0.
3189                  */
3190                 udelay(50);
3191                 break;
3192         }
3193         default:
3194                 return -EINVAL;
3195         }
3196         return 0;
3197 }
3198
3199 static int
3200 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3201 {
3202         u32 val;
3203         int j;
3204
3205         /* Request access to the flash interface. */
3206         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3207         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3208                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3209                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3210                         break;
3211
3212                 udelay(5);
3213         }
3214
3215         if (j >= NVRAM_TIMEOUT_COUNT)
3216                 return -EBUSY;
3217
3218         return 0;
3219 }
3220
3221 static int
3222 bnx2_release_nvram_lock(struct bnx2 *bp)
3223 {
3224         int j;
3225         u32 val;
3226
3227         /* Relinquish nvram interface. */
3228         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3229
3230         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3231                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3232                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3233                         break;
3234
3235                 udelay(5);
3236         }
3237
3238         if (j >= NVRAM_TIMEOUT_COUNT)
3239                 return -EBUSY;
3240
3241         return 0;
3242 }
3243
3244
3245 static int
3246 bnx2_enable_nvram_write(struct bnx2 *bp)
3247 {
3248         u32 val;
3249
3250         val = REG_RD(bp, BNX2_MISC_CFG);
3251         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3252
3253         if (!bp->flash_info->buffered) {
3254                 int j;
3255
3256                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3257                 REG_WR(bp, BNX2_NVM_COMMAND,
3258                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3259
3260                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3261                         udelay(5);
3262
3263                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3264                         if (val & BNX2_NVM_COMMAND_DONE)
3265                                 break;
3266                 }
3267
3268                 if (j >= NVRAM_TIMEOUT_COUNT)
3269                         return -EBUSY;
3270         }
3271         return 0;
3272 }
3273
3274 static void
3275 bnx2_disable_nvram_write(struct bnx2 *bp)
3276 {
3277         u32 val;
3278
3279         val = REG_RD(bp, BNX2_MISC_CFG);
3280         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3281 }
3282
3283
3284 static void
3285 bnx2_enable_nvram_access(struct bnx2 *bp)
3286 {
3287         u32 val;
3288
3289         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3290         /* Enable both bits, even on read. */
3291         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3292                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3293 }
3294
3295 static void
3296 bnx2_disable_nvram_access(struct bnx2 *bp)
3297 {
3298         u32 val;
3299
3300         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3301         /* Disable both bits, even after read. */
3302         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3303                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3304                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3305 }
3306
3307 static int
3308 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3309 {
3310         u32 cmd;
3311         int j;
3312
3313         if (bp->flash_info->buffered)
3314                 /* Buffered flash, no erase needed */
3315                 return 0;
3316
3317         /* Build an erase command */
3318         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3319               BNX2_NVM_COMMAND_DOIT;
3320
3321         /* Need to clear DONE bit separately. */
3322         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3323
3324         /* Address of the NVRAM to read from. */
3325         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3326
3327         /* Issue an erase command. */
3328         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3329
3330         /* Wait for completion. */
3331         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3332                 u32 val;
3333
3334                 udelay(5);
3335
3336                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3337                 if (val & BNX2_NVM_COMMAND_DONE)
3338                         break;
3339         }
3340
3341         if (j >= NVRAM_TIMEOUT_COUNT)
3342                 return -EBUSY;
3343
3344         return 0;
3345 }
3346
3347 static int
3348 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3349 {
3350         u32 cmd;
3351         int j;
3352
3353         /* Build the command word. */
3354         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3355
3356         /* Calculate an offset of a buffered flash. */
3357         if (bp->flash_info->buffered) {
3358                 offset = ((offset / bp->flash_info->page_size) <<
3359                            bp->flash_info->page_bits) +
3360                           (offset % bp->flash_info->page_size);
3361         }
3362
3363         /* Need to clear DONE bit separately. */
3364         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3365
3366         /* Address of the NVRAM to read from. */
3367         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3368
3369         /* Issue a read command. */
3370         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3371
3372         /* Wait for completion. */
3373         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3374                 u32 val;
3375
3376                 udelay(5);
3377
3378                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3379                 if (val & BNX2_NVM_COMMAND_DONE) {
3380                         val = REG_RD(bp, BNX2_NVM_READ);
3381
3382                         val = be32_to_cpu(val);
3383                         memcpy(ret_val, &val, 4);
3384                         break;
3385                 }
3386         }
3387         if (j >= NVRAM_TIMEOUT_COUNT)
3388                 return -EBUSY;
3389
3390         return 0;
3391 }
3392
3393
3394 static int
3395 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3396 {
3397         u32 cmd, val32;
3398         int j;
3399
3400         /* Build the command word. */
3401         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3402
3403         /* Calculate an offset of a buffered flash. */
3404         if (bp->flash_info->buffered) {
3405                 offset = ((offset / bp->flash_info->page_size) <<
3406                           bp->flash_info->page_bits) +
3407                          (offset % bp->flash_info->page_size);
3408         }
3409
3410         /* Need to clear DONE bit separately. */
3411         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3412
3413         memcpy(&val32, val, 4);
3414         val32 = cpu_to_be32(val32);
3415
3416         /* Write the data. */
3417         REG_WR(bp, BNX2_NVM_WRITE, val32);
3418
3419         /* Address of the NVRAM to write to. */
3420         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3421
3422         /* Issue the write command. */
3423         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3424
3425         /* Wait for completion. */
3426         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3427                 udelay(5);
3428
3429                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3430                         break;
3431         }
3432         if (j >= NVRAM_TIMEOUT_COUNT)
3433                 return -EBUSY;
3434
3435         return 0;
3436 }
3437
3438 static int
3439 bnx2_init_nvram(struct bnx2 *bp)
3440 {
3441         u32 val;
3442         int j, entry_count, rc;
3443         struct flash_spec *flash;
3444
3445         /* Determine the selected interface. */
3446         val = REG_RD(bp, BNX2_NVM_CFG1);
3447
3448         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3449
3450         rc = 0;
3451         if (val & 0x40000000) {
3452
3453                 /* Flash interface has been reconfigured */
3454                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3455                      j++, flash++) {
3456                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3457                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3458                                 bp->flash_info = flash;
3459                                 break;
3460                         }
3461                 }
3462         }
3463         else {
3464                 u32 mask;
3465                 /* Not yet been reconfigured */
3466
3467                 if (val & (1 << 23))
3468                         mask = FLASH_BACKUP_STRAP_MASK;
3469                 else
3470                         mask = FLASH_STRAP_MASK;
3471
3472                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3473                         j++, flash++) {
3474
3475                         if ((val & mask) == (flash->strapping & mask)) {
3476                                 bp->flash_info = flash;
3477
3478                                 /* Request access to the flash interface. */
3479                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3480                                         return rc;
3481
3482                                 /* Enable access to flash interface */
3483                                 bnx2_enable_nvram_access(bp);
3484
3485                                 /* Reconfigure the flash interface */
3486                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3487                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3488                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3489                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3490
3491                                 /* Disable access to flash interface */
3492                                 bnx2_disable_nvram_access(bp);
3493                                 bnx2_release_nvram_lock(bp);
3494
3495                                 break;
3496                         }
3497                 }
3498         } /* if (val & 0x40000000) */
3499
3500         if (j == entry_count) {
3501                 bp->flash_info = NULL;
3502                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3503                 return -ENODEV;
3504         }
3505
3506         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3507         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3508         if (val)
3509                 bp->flash_size = val;
3510         else
3511                 bp->flash_size = bp->flash_info->total_size;
3512
3513         return rc;
3514 }
3515
3516 static int
3517 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3518                 int buf_size)
3519 {
3520         int rc = 0;
3521         u32 cmd_flags, offset32, len32, extra;
3522
3523         if (buf_size == 0)
3524                 return 0;
3525
3526         /* Request access to the flash interface. */
3527         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3528                 return rc;
3529
3530         /* Enable access to flash interface */
3531         bnx2_enable_nvram_access(bp);
3532
3533         len32 = buf_size;
3534         offset32 = offset;
3535         extra = 0;
3536
3537         cmd_flags = 0;
3538
3539         if (offset32 & 3) {
3540                 u8 buf[4];
3541                 u32 pre_len;
3542
3543                 offset32 &= ~3;
3544                 pre_len = 4 - (offset & 3);
3545
3546                 if (pre_len >= len32) {
3547                         pre_len = len32;
3548                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3549                                     BNX2_NVM_COMMAND_LAST;
3550                 }
3551                 else {
3552                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3553                 }
3554
3555                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3556
3557                 if (rc)
3558                         return rc;
3559
3560                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3561
3562                 offset32 += 4;
3563                 ret_buf += pre_len;
3564                 len32 -= pre_len;
3565         }
3566         if (len32 & 3) {
3567                 extra = 4 - (len32 & 3);
3568                 len32 = (len32 + 4) & ~3;
3569         }
3570
3571         if (len32 == 4) {
3572                 u8 buf[4];
3573
3574                 if (cmd_flags)
3575                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3576                 else
3577                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3578                                     BNX2_NVM_COMMAND_LAST;
3579
3580                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3581
3582                 memcpy(ret_buf, buf, 4 - extra);
3583         }
3584         else if (len32 > 0) {
3585                 u8 buf[4];
3586
3587                 /* Read the first word. */
3588                 if (cmd_flags)
3589                         cmd_flags = 0;
3590                 else
3591                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3592
3593                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3594
3595                 /* Advance to the next dword. */
3596                 offset32 += 4;
3597                 ret_buf += 4;
3598                 len32 -= 4;
3599
3600                 while (len32 > 4 && rc == 0) {
3601                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3602
3603                         /* Advance to the next dword. */
3604                         offset32 += 4;
3605                         ret_buf += 4;
3606                         len32 -= 4;
3607                 }
3608
3609                 if (rc)
3610                         return rc;
3611
3612                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3613                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3614
3615                 memcpy(ret_buf, buf, 4 - extra);
3616         }
3617
3618         /* Disable access to flash interface */
3619         bnx2_disable_nvram_access(bp);
3620
3621         bnx2_release_nvram_lock(bp);
3622
3623         return rc;
3624 }
3625
3626 static int
3627 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3628                 int buf_size)
3629 {
3630         u32 written, offset32, len32;
3631         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3632         int rc = 0;
3633         int align_start, align_end;
3634
3635         buf = data_buf;
3636         offset32 = offset;
3637         len32 = buf_size;
3638         align_start = align_end = 0;
3639
3640         if ((align_start = (offset32 & 3))) {
3641                 offset32 &= ~3;
3642                 len32 += align_start;
3643                 if (len32 < 4)
3644                         len32 = 4;
3645                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3646                         return rc;
3647         }
3648
3649         if (len32 & 3) {
3650                 align_end = 4 - (len32 & 3);
3651                 len32 += align_end;
3652                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3653                         return rc;
3654         }
3655
3656         if (align_start || align_end) {
3657                 align_buf = kmalloc(len32, GFP_KERNEL);
3658                 if (align_buf == NULL)
3659                         return -ENOMEM;
3660                 if (align_start) {
3661                         memcpy(align_buf, start, 4);
3662                 }
3663                 if (align_end) {
3664                         memcpy(align_buf + len32 - 4, end, 4);
3665                 }
3666                 memcpy(align_buf + align_start, data_buf, buf_size);
3667                 buf = align_buf;
3668         }
3669
3670         if (bp->flash_info->buffered == 0) {
3671                 flash_buffer = kmalloc(264, GFP_KERNEL);
3672                 if (flash_buffer == NULL) {
3673                         rc = -ENOMEM;
3674                         goto nvram_write_end;
3675                 }
3676         }
3677
3678         written = 0;
3679         while ((written < len32) && (rc == 0)) {
3680                 u32 page_start, page_end, data_start, data_end;
3681                 u32 addr, cmd_flags;
3682                 int i;
3683
3684                 /* Find the page_start addr */
3685                 page_start = offset32 + written;
3686                 page_start -= (page_start % bp->flash_info->page_size);
3687                 /* Find the page_end addr */
3688                 page_end = page_start + bp->flash_info->page_size;
3689                 /* Find the data_start addr */
3690                 data_start = (written == 0) ? offset32 : page_start;
3691                 /* Find the data_end addr */
3692                 data_end = (page_end > offset32 + len32) ?
3693                         (offset32 + len32) : page_end;
3694
3695                 /* Request access to the flash interface. */
3696                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3697                         goto nvram_write_end;
3698
3699                 /* Enable access to flash interface */
3700                 bnx2_enable_nvram_access(bp);
3701
3702                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3703                 if (bp->flash_info->buffered == 0) {
3704                         int j;
3705
3706                         /* Read the whole page into the buffer
3707                          * (non-buffer flash only) */
3708                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3709                                 if (j == (bp->flash_info->page_size - 4)) {
3710                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3711                                 }
3712                                 rc = bnx2_nvram_read_dword(bp,
3713                                         page_start + j,
3714                                         &flash_buffer[j],
3715                                         cmd_flags);
3716
3717                                 if (rc)
3718                                         goto nvram_write_end;
3719
3720                                 cmd_flags = 0;
3721                         }
3722                 }
3723
3724                 /* Enable writes to flash interface (unlock write-protect) */
3725                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3726                         goto nvram_write_end;
3727
3728                 /* Loop to write back the buffer data from page_start to
3729                  * data_start */
3730                 i = 0;
3731                 if (bp->flash_info->buffered == 0) {
3732                         /* Erase the page */
3733                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3734                                 goto nvram_write_end;
3735
3736                         /* Re-enable the write again for the actual write */
3737                         bnx2_enable_nvram_write(bp);
3738
3739                         for (addr = page_start; addr < data_start;
3740                                 addr += 4, i += 4) {
3741
3742                                 rc = bnx2_nvram_write_dword(bp, addr,
3743                                         &flash_buffer[i], cmd_flags);
3744
3745                                 if (rc != 0)
3746                                         goto nvram_write_end;
3747
3748                                 cmd_flags = 0;
3749                         }
3750                 }
3751
3752                 /* Loop to write the new data from data_start to data_end */
3753                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3754                         if ((addr == page_end - 4) ||
3755                                 ((bp->flash_info->buffered) &&
3756                                  (addr == data_end - 4))) {
3757
3758                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3759                         }
3760                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3761                                 cmd_flags);
3762
3763                         if (rc != 0)
3764                                 goto nvram_write_end;
3765
3766                         cmd_flags = 0;
3767                         buf += 4;
3768                 }
3769
3770                 /* Loop to write back the buffer data from data_end
3771                  * to page_end */
3772                 if (bp->flash_info->buffered == 0) {
3773                         for (addr = data_end; addr < page_end;
3774                                 addr += 4, i += 4) {
3775
3776                                 if (addr == page_end-4) {
3777                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3778                                 }
3779                                 rc = bnx2_nvram_write_dword(bp, addr,
3780                                         &flash_buffer[i], cmd_flags);
3781
3782                                 if (rc != 0)
3783                                         goto nvram_write_end;
3784
3785                                 cmd_flags = 0;
3786                         }
3787                 }
3788
3789                 /* Disable writes to flash interface (lock write-protect) */
3790                 bnx2_disable_nvram_write(bp);
3791
3792                 /* Disable access to flash interface */
3793                 bnx2_disable_nvram_access(bp);
3794                 bnx2_release_nvram_lock(bp);
3795
3796                 /* Increment written */
3797                 written += data_end - data_start;
3798         }
3799
3800 nvram_write_end:
3801         kfree(flash_buffer);
3802         kfree(align_buf);
3803         return rc;
3804 }
3805
3806 static void
3807 bnx2_init_remote_phy(struct bnx2 *bp)
3808 {
3809         u32 val;
3810
3811         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3812         if (!(bp->phy_flags & PHY_SERDES_FLAG))
3813                 return;
3814
3815         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3816         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3817                 return;
3818
3819         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3820                 if (netif_running(bp->dev)) {
3821                         val = BNX2_DRV_ACK_CAP_SIGNATURE |
3822                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3823                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3824                                    val);
3825                 }
3826                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3827
3828                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3829                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3830                         bp->phy_port = PORT_FIBRE;
3831                 else
3832                         bp->phy_port = PORT_TP;
3833         }
3834 }
3835
3836 static int
3837 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3838 {
3839         u32 val;
3840         int i, rc = 0;
3841
3842         /* Wait for the current PCI transaction to complete before
3843          * issuing a reset. */
3844         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3845                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3846                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3847                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3848                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3849         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3850         udelay(5);
3851
3852         /* Wait for the firmware to tell us it is ok to issue a reset. */
3853         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3854
3855         /* Deposit a driver reset signature so the firmware knows that
3856          * this is a soft reset. */
3857         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3858                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3859
3860         /* Do a dummy read to force the chip to complete all current transaction
3861          * before we issue a reset. */
3862         val = REG_RD(bp, BNX2_MISC_ID);
3863
3864         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3865                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3866                 REG_RD(bp, BNX2_MISC_COMMAND);
3867                 udelay(5);
3868
3869                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3870                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3871
3872                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3873
3874         } else {
3875                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3876                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3877                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3878
3879                 /* Chip reset. */
3880                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3881
3882                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3883                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3884                         current->state = TASK_UNINTERRUPTIBLE;
3885                         schedule_timeout(HZ / 50);
3886                 }
3887
3888                 /* Reset takes approximate 30 usec */
3889                 for (i = 0; i < 10; i++) {
3890                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3891                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3892                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3893                                 break;
3894                         udelay(10);
3895                 }
3896
3897                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3898                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3899                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3900                         return -EBUSY;
3901                 }
3902         }
3903
3904         /* Make sure byte swapping is properly configured. */
3905         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3906         if (val != 0x01020304) {
3907                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3908                 return -ENODEV;
3909         }
3910
3911         /* Wait for the firmware to finish its initialization. */
3912         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3913         if (rc)
3914                 return rc;
3915
3916         spin_lock_bh(&bp->phy_lock);
3917         bnx2_init_remote_phy(bp);
3918         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3919                 bnx2_set_default_remote_link(bp);
3920         spin_unlock_bh(&bp->phy_lock);
3921
3922         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3923                 /* Adjust the voltage regular to two steps lower.  The default
3924                  * of this register is 0x0000000e. */
3925                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3926
3927                 /* Remove bad rbuf memory from the free pool. */
3928                 rc = bnx2_alloc_bad_rbuf(bp);
3929         }
3930
3931         return rc;
3932 }
3933
3934 static int
3935 bnx2_init_chip(struct bnx2 *bp)
3936 {
3937         u32 val;
3938         int rc;
3939
3940         /* Make sure the interrupt is not active. */
3941         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3942
3943         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3944               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3945 #ifdef __BIG_ENDIAN
3946               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3947 #endif
3948               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3949               DMA_READ_CHANS << 12 |
3950               DMA_WRITE_CHANS << 16;
3951
3952         val |= (0x2 << 20) | (1 << 11);
3953
3954         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3955                 val |= (1 << 23);
3956
3957         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3958             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3959                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3960
3961         REG_WR(bp, BNX2_DMA_CONFIG, val);
3962
3963         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3964                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3965                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3966                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3967         }
3968
3969         if (bp->flags & PCIX_FLAG) {
3970                 u16 val16;
3971
3972                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3973                                      &val16);
3974                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3975                                       val16 & ~PCI_X_CMD_ERO);
3976         }
3977
3978         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3979                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3980                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3981                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3982
3983         /* Initialize context mapping and zero out the quick contexts.  The
3984          * context block must have already been enabled. */
3985         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3986                 rc = bnx2_init_5709_context(bp);
3987                 if (rc)
3988                         return rc;
3989         } else
3990                 bnx2_init_context(bp);
3991
3992         if ((rc = bnx2_init_cpus(bp)) != 0)
3993                 return rc;
3994
3995         bnx2_init_nvram(bp);
3996
3997         bnx2_set_mac_addr(bp);
3998
3999         val = REG_RD(bp, BNX2_MQ_CONFIG);
4000         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4001         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4002         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4003                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4004
4005         REG_WR(bp, BNX2_MQ_CONFIG, val);
4006
4007         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4008         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4009         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4010
4011         val = (BCM_PAGE_BITS - 8) << 24;
4012         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4013
4014         /* Configure page size. */
4015         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4016         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4017         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4018         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4019
4020         val = bp->mac_addr[0] +
4021               (bp->mac_addr[1] << 8) +
4022               (bp->mac_addr[2] << 16) +
4023               bp->mac_addr[3] +
4024               (bp->mac_addr[4] << 8) +
4025               (bp->mac_addr[5] << 16);
4026         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4027
4028         /* Program the MTU.  Also include 4 bytes for CRC32. */
4029         val = bp->dev->mtu + ETH_HLEN + 4;
4030         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4031                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4032         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4033
4034         bp->last_status_idx = 0;
4035         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4036
4037         /* Set up how to generate a link change interrupt. */
4038         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4039
4040         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4041                (u64) bp->status_blk_mapping & 0xffffffff);
4042         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4043
4044         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4045                (u64) bp->stats_blk_mapping & 0xffffffff);
4046         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4047                (u64) bp->stats_blk_mapping >> 32);
4048
4049         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4050                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4051
4052         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4053                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4054
4055         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4056                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4057
4058         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4059
4060         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4061
4062         REG_WR(bp, BNX2_HC_COM_TICKS,
4063                (bp->com_ticks_int << 16) | bp->com_ticks);
4064
4065         REG_WR(bp, BNX2_HC_CMD_TICKS,
4066                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4067
4068         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4069                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4070         else
4071                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
4072         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4073
4074         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4075                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4076         else {
4077                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4078                       BNX2_HC_CONFIG_COLLECT_STATS;
4079         }
4080
4081         if (bp->flags & ONE_SHOT_MSI_FLAG)
4082                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4083
4084         REG_WR(bp, BNX2_HC_CONFIG, val);
4085
4086         /* Clear internal stats counters. */
4087         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4088
4089         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4090
4091         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4092             BNX2_PORT_FEATURE_ASF_ENABLED)
4093                 bp->flags |= ASF_ENABLE_FLAG;
4094
4095         /* Initialize the receive filter. */
4096         bnx2_set_rx_mode(bp->dev);
4097
4098         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4099                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4100                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4101                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4102         }
4103         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4104                           0);
4105
4106         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
4107         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4108
4109         udelay(20);
4110
4111         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4112
4113         return rc;
4114 }
4115
4116 static void
4117 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4118 {
4119         u32 val, offset0, offset1, offset2, offset3;
4120
4121         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4122                 offset0 = BNX2_L2CTX_TYPE_XI;
4123                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4124                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4125                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4126         } else {
4127                 offset0 = BNX2_L2CTX_TYPE;
4128                 offset1 = BNX2_L2CTX_CMD_TYPE;
4129                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4130                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4131         }
4132         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4133         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4134
4135         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4136         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4137
4138         val = (u64) bp->tx_desc_mapping >> 32;
4139         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4140
4141         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4142         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4143 }
4144
4145 static void
4146 bnx2_init_tx_ring(struct bnx2 *bp)
4147 {
4148         struct tx_bd *txbd;
4149         u32 cid;
4150
4151         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4152
4153         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4154
4155         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4156         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4157
4158         bp->tx_prod = 0;
4159         bp->tx_cons = 0;
4160         bp->hw_tx_cons = 0;
4161         bp->tx_prod_bseq = 0;
4162
4163         cid = TX_CID;
4164         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4165         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4166
4167         bnx2_init_tx_context(bp, cid);
4168 }
4169
4170 static void
4171 bnx2_init_rx_ring(struct bnx2 *bp)
4172 {
4173         struct rx_bd *rxbd;
4174         int i;
4175         u16 prod, ring_prod;
4176         u32 val;
4177
4178         /* 8 for CRC and VLAN */
4179         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4180         /* hw alignment */
4181         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4182
4183         ring_prod = prod = bp->rx_prod = 0;
4184         bp->rx_cons = 0;
4185         bp->hw_rx_cons = 0;
4186         bp->rx_prod_bseq = 0;
4187
4188         for (i = 0; i < bp->rx_max_ring; i++) {
4189                 int j;
4190
4191                 rxbd = &bp->rx_desc_ring[i][0];
4192                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4193                         rxbd->rx_bd_len = bp->rx_buf_use_size;
4194                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4195                 }
4196                 if (i == (bp->rx_max_ring - 1))
4197                         j = 0;
4198                 else
4199                         j = i + 1;
4200                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4201                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4202                                        0xffffffff;
4203         }
4204
4205         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4206         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4207         val |= 0x02 << 8;
4208         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4209
4210         val = (u64) bp->rx_desc_mapping[0] >> 32;
4211         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4212
4213         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4214         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4215
4216         for (i = 0; i < bp->rx_ring_size; i++) {
4217                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4218                         break;
4219                 }
4220                 prod = NEXT_RX_BD(prod);
4221                 ring_prod = RX_RING_IDX(prod);
4222         }
4223         bp->rx_prod = prod;
4224
4225         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4226
4227         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4228 }
4229
4230 static void
4231 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4232 {
4233         u32 num_rings, max;
4234
4235         bp->rx_ring_size = size;
4236         num_rings = 1;
4237         while (size > MAX_RX_DESC_CNT) {
4238                 size -= MAX_RX_DESC_CNT;
4239                 num_rings++;
4240         }
4241         /* round to next power of 2 */
4242         max = MAX_RX_RINGS;
4243         while ((max & num_rings) == 0)
4244                 max >>= 1;
4245
4246         if (num_rings != max)
4247                 max <<= 1;
4248
4249         bp->rx_max_ring = max;
4250         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4251 }
4252
4253 static void
4254 bnx2_free_tx_skbs(struct bnx2 *bp)
4255 {
4256         int i;
4257
4258         if (bp->tx_buf_ring == NULL)
4259                 return;
4260
4261         for (i = 0; i < TX_DESC_CNT; ) {
4262                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4263                 struct sk_buff *skb = tx_buf->skb;
4264                 int j, last;
4265
4266                 if (skb == NULL) {
4267                         i++;
4268                         continue;
4269                 }
4270
4271                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4272                         skb_headlen(skb), PCI_DMA_TODEVICE);
4273
4274                 tx_buf->skb = NULL;
4275
4276                 last = skb_shinfo(skb)->nr_frags;
4277                 for (j = 0; j < last; j++) {
4278                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4279                         pci_unmap_page(bp->pdev,
4280                                 pci_unmap_addr(tx_buf, mapping),
4281                                 skb_shinfo(skb)->frags[j].size,
4282                                 PCI_DMA_TODEVICE);
4283                 }
4284                 dev_kfree_skb(skb);
4285                 i += j + 1;
4286         }
4287
4288 }
4289
4290 static void
4291 bnx2_free_rx_skbs(struct bnx2 *bp)
4292 {
4293         int i;
4294
4295         if (bp->rx_buf_ring == NULL)
4296                 return;
4297
4298         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4299                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4300                 struct sk_buff *skb = rx_buf->skb;
4301
4302                 if (skb == NULL)
4303                         continue;
4304
4305                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4306                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4307
4308                 rx_buf->skb = NULL;
4309
4310                 dev_kfree_skb(skb);
4311         }
4312 }
4313
4314 static void
4315 bnx2_free_skbs(struct bnx2 *bp)
4316 {
4317         bnx2_free_tx_skbs(bp);
4318         bnx2_free_rx_skbs(bp);
4319 }
4320
4321 static int
4322 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4323 {
4324         int rc;
4325
4326         rc = bnx2_reset_chip(bp, reset_code);
4327         bnx2_free_skbs(bp);
4328         if (rc)
4329                 return rc;
4330
4331         if ((rc = bnx2_init_chip(bp)) != 0)
4332                 return rc;
4333
4334         bnx2_init_tx_ring(bp);
4335         bnx2_init_rx_ring(bp);
4336         return 0;
4337 }
4338
4339 static int
4340 bnx2_init_nic(struct bnx2 *bp)
4341 {
4342         int rc;
4343
4344         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4345                 return rc;
4346
4347         spin_lock_bh(&bp->phy_lock);
4348         bnx2_init_phy(bp);
4349         bnx2_set_link(bp);
4350         spin_unlock_bh(&bp->phy_lock);
4351         return 0;
4352 }
4353
4354 static int
4355 bnx2_test_registers(struct bnx2 *bp)
4356 {
4357         int ret;
4358         int i, is_5709;
4359         static const struct {
4360                 u16   offset;
4361                 u16   flags;
4362 #define BNX2_FL_NOT_5709        1
4363                 u32   rw_mask;
4364                 u32   ro_mask;
4365         } reg_tbl[] = {
4366                 { 0x006c, 0, 0x00000000, 0x0000003f },
4367                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4368                 { 0x0094, 0, 0x00000000, 0x00000000 },
4369
4370                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4371                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4372                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4373                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4374                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4375                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4376                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4377                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4378                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4379
4380                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4381                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4382                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4383                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4384                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4385                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4386
4387                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4388                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4389                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4390
4391                 { 0x1000, 0, 0x00000000, 0x00000001 },
4392                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4393
4394                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4395                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4396                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4397                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4398                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4399                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4400                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4401                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4402                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4403                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4404
4405                 { 0x1800, 0, 0x00000000, 0x00000001 },
4406                 { 0x1804, 0, 0x00000000, 0x00000003 },
4407
4408                 { 0x2800, 0, 0x00000000, 0x00000001 },
4409                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4410                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4411                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4412                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4413                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4414                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4415                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4416                 { 0x2840, 0, 0x00000000, 0xffffffff },
4417                 { 0x2844, 0, 0x00000000, 0xffffffff },
4418                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4419                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4420
4421                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4422                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4423
4424                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4425                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4426                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4427                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4428                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4429                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4430                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4431                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4432                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4433
4434                 { 0x5004, 0, 0x00000000, 0x0000007f },
4435                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4436
4437                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4438                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4439                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4440                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4441                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4442                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4443                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4444                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4445                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4446
4447                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4448                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4449                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4450                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4451                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4452                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4453                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4454                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4455                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4456                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4457                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4458                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4459                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4460                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4461                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4462                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4463                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4464                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4465                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4466                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4467                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4468                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4469                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4470
4471                 { 0xffff, 0, 0x00000000, 0x00000000 },
4472         };
4473
4474         ret = 0;
4475         is_5709 = 0;
4476         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4477                 is_5709 = 1;
4478
4479         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4480                 u32 offset, rw_mask, ro_mask, save_val, val;
4481                 u16 flags = reg_tbl[i].flags;
4482
4483                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4484                         continue;
4485
4486                 offset = (u32) reg_tbl[i].offset;
4487                 rw_mask = reg_tbl[i].rw_mask;
4488                 ro_mask = reg_tbl[i].ro_mask;
4489
4490                 save_val = readl(bp->regview + offset);
4491
4492                 writel(0, bp->regview + offset);
4493
4494                 val = readl(bp->regview + offset);
4495                 if ((val & rw_mask) != 0) {
4496                         goto reg_test_err;
4497                 }
4498
4499                 if ((val & ro_mask) != (save_val & ro_mask)) {
4500                         goto reg_test_err;
4501                 }
4502
4503                 writel(0xffffffff, bp->regview + offset);
4504
4505                 val = readl(bp->regview + offset);
4506                 if ((val & rw_mask) != rw_mask) {
4507                         goto reg_test_err;
4508                 }
4509
4510                 if ((val & ro_mask) != (save_val & ro_mask)) {
4511                         goto reg_test_err;
4512                 }
4513
4514                 writel(save_val, bp->regview + offset);
4515                 continue;
4516
4517 reg_test_err:
4518                 writel(save_val, bp->regview + offset);
4519                 ret = -ENODEV;
4520                 break;
4521         }
4522         return ret;
4523 }
4524
4525 static int
4526 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4527 {
4528         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4529                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4530         int i;
4531
4532         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4533                 u32 offset;
4534
4535                 for (offset = 0; offset < size; offset += 4) {
4536
4537                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4538
4539                         if (REG_RD_IND(bp, start + offset) !=
4540                                 test_pattern[i]) {
4541                                 return -ENODEV;
4542                         }
4543                 }
4544         }
4545         return 0;
4546 }
4547
4548 static int
4549 bnx2_test_memory(struct bnx2 *bp)
4550 {
4551         int ret = 0;
4552         int i;
4553         static struct mem_entry {
4554                 u32   offset;
4555                 u32   len;
4556         } mem_tbl_5706[] = {
4557                 { 0x60000,  0x4000 },
4558                 { 0xa0000,  0x3000 },
4559                 { 0xe0000,  0x4000 },
4560                 { 0x120000, 0x4000 },
4561                 { 0x1a0000, 0x4000 },
4562                 { 0x160000, 0x4000 },
4563                 { 0xffffffff, 0    },
4564         },
4565         mem_tbl_5709[] = {
4566                 { 0x60000,  0x4000 },
4567                 { 0xa0000,  0x3000 },
4568                 { 0xe0000,  0x4000 },
4569                 { 0x120000, 0x4000 },
4570                 { 0x1a0000, 0x4000 },
4571                 { 0xffffffff, 0    },
4572         };
4573         struct mem_entry *mem_tbl;
4574
4575         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4576                 mem_tbl = mem_tbl_5709;
4577         else
4578                 mem_tbl = mem_tbl_5706;
4579
4580         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4581                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4582                         mem_tbl[i].len)) != 0) {
4583                         return ret;
4584                 }
4585         }
4586
4587         return ret;
4588 }
4589
4590 #define BNX2_MAC_LOOPBACK       0
4591 #define BNX2_PHY_LOOPBACK       1
4592
4593 static int
4594 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4595 {
4596         unsigned int pkt_size, num_pkts, i;
4597         struct sk_buff *skb, *rx_skb;
4598         unsigned char *packet;
4599         u16 rx_start_idx, rx_idx;
4600         dma_addr_t map;
4601         struct tx_bd *txbd;
4602         struct sw_bd *rx_buf;
4603         struct l2_fhdr *rx_hdr;
4604         int ret = -ENODEV;
4605
4606         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4607                 bp->loopback = MAC_LOOPBACK;
4608                 bnx2_set_mac_loopback(bp);
4609         }
4610         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4611                 bp->loopback = PHY_LOOPBACK;
4612                 bnx2_set_phy_loopback(bp);
4613         }
4614         else
4615                 return -EINVAL;
4616
4617         pkt_size = 1514;
4618         skb = netdev_alloc_skb(bp->dev, pkt_size);
4619         if (!skb)
4620                 return -ENOMEM;
4621         packet = skb_put(skb, pkt_size);
4622         memcpy(packet, bp->dev->dev_addr, 6);
4623         memset(packet + 6, 0x0, 8);
4624         for (i = 14; i < pkt_size; i++)
4625                 packet[i] = (unsigned char) (i & 0xff);
4626
4627         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4628                 PCI_DMA_TODEVICE);
4629
4630         REG_WR(bp, BNX2_HC_COMMAND,
4631                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4632
4633         REG_RD(bp, BNX2_HC_COMMAND);
4634
4635         udelay(5);
4636         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4637
4638         num_pkts = 0;
4639
4640         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4641
4642         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4643         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4644         txbd->tx_bd_mss_nbytes = pkt_size;
4645         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4646
4647         num_pkts++;
4648         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4649         bp->tx_prod_bseq += pkt_size;
4650
4651         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4652         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4653
4654         udelay(100);
4655
4656         REG_WR(bp, BNX2_HC_COMMAND,
4657                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4658
4659         REG_RD(bp, BNX2_HC_COMMAND);
4660
4661         udelay(5);
4662
4663         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4664         dev_kfree_skb(skb);
4665
4666         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4667                 goto loopback_test_done;
4668         }
4669
4670         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4671         if (rx_idx != rx_start_idx + num_pkts) {
4672                 goto loopback_test_done;
4673         }
4674
4675         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4676         rx_skb = rx_buf->skb;
4677
4678         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4679         skb_reserve(rx_skb, bp->rx_offset);
4680
4681         pci_dma_sync_single_for_cpu(bp->pdev,
4682                 pci_unmap_addr(rx_buf, mapping),
4683                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4684
4685         if (rx_hdr->l2_fhdr_status &
4686                 (L2_FHDR_ERRORS_BAD_CRC |
4687                 L2_FHDR_ERRORS_PHY_DECODE |
4688                 L2_FHDR_ERRORS_ALIGNMENT |
4689                 L2_FHDR_ERRORS_TOO_SHORT |
4690                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4691
4692                 goto loopback_test_done;
4693         }
4694
4695         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4696                 goto loopback_test_done;
4697         }
4698
4699         for (i = 14; i < pkt_size; i++) {
4700                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4701                         goto loopback_test_done;
4702                 }
4703         }
4704
4705         ret = 0;
4706
4707 loopback_test_done:
4708         bp->loopback = 0;
4709         return ret;
4710 }
4711
4712 #define BNX2_MAC_LOOPBACK_FAILED        1
4713 #define BNX2_PHY_LOOPBACK_FAILED        2
4714 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4715                                          BNX2_PHY_LOOPBACK_FAILED)
4716
4717 static int
4718 bnx2_test_loopback(struct bnx2 *bp)
4719 {
4720         int rc = 0;
4721
4722         if (!netif_running(bp->dev))
4723                 return BNX2_LOOPBACK_FAILED;
4724
4725         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4726         spin_lock_bh(&bp->phy_lock);
4727         bnx2_init_phy(bp);
4728         spin_unlock_bh(&bp->phy_lock);
4729         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4730                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4731         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4732                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4733         return rc;
4734 }
4735
4736 #define NVRAM_SIZE 0x200
4737 #define CRC32_RESIDUAL 0xdebb20e3
4738
4739 static int
4740 bnx2_test_nvram(struct bnx2 *bp)
4741 {
4742         u32 buf[NVRAM_SIZE / 4];
4743         u8 *data = (u8 *) buf;
4744         int rc = 0;
4745         u32 magic, csum;
4746
4747         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4748                 goto test_nvram_done;
4749
4750         magic = be32_to_cpu(buf[0]);
4751         if (magic != 0x669955aa) {
4752                 rc = -ENODEV;
4753                 goto test_nvram_done;
4754         }
4755
4756         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4757                 goto test_nvram_done;
4758
4759         csum = ether_crc_le(0x100, data);
4760         if (csum != CRC32_RESIDUAL) {
4761                 rc = -ENODEV;
4762                 goto test_nvram_done;
4763         }
4764
4765         csum = ether_crc_le(0x100, data + 0x100);
4766         if (csum != CRC32_RESIDUAL) {
4767                 rc = -ENODEV;
4768         }
4769
4770 test_nvram_done:
4771         return rc;
4772 }
4773
4774 static int
4775 bnx2_test_link(struct bnx2 *bp)
4776 {
4777         u32 bmsr;
4778
4779         spin_lock_bh(&bp->phy_lock);
4780         bnx2_enable_bmsr1(bp);
4781         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4782         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4783         bnx2_disable_bmsr1(bp);
4784         spin_unlock_bh(&bp->phy_lock);
4785
4786         if (bmsr & BMSR_LSTATUS) {
4787                 return 0;
4788         }
4789         return -ENODEV;
4790 }
4791
4792 static int
4793 bnx2_test_intr(struct bnx2 *bp)
4794 {
4795         int i;
4796         u16 status_idx;
4797
4798         if (!netif_running(bp->dev))
4799                 return -ENODEV;
4800
4801         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4802
4803         /* This register is not touched during run-time. */
4804         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4805         REG_RD(bp, BNX2_HC_COMMAND);
4806
4807         for (i = 0; i < 10; i++) {
4808                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4809                         status_idx) {
4810
4811                         break;
4812                 }
4813
4814                 msleep_interruptible(10);
4815         }
4816         if (i < 10)
4817                 return 0;
4818
4819         return -ENODEV;
4820 }
4821
4822 static void
4823 bnx2_5706_serdes_timer(struct bnx2 *bp)
4824 {
4825         spin_lock(&bp->phy_lock);
4826         if (bp->serdes_an_pending)
4827                 bp->serdes_an_pending--;
4828         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4829                 u32 bmcr;
4830
4831                 bp->current_interval = bp->timer_interval;
4832
4833                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4834
4835                 if (bmcr & BMCR_ANENABLE) {
4836                         u32 phy1, phy2;
4837
4838                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4839                         bnx2_read_phy(bp, 0x1c, &phy1);
4840
4841                         bnx2_write_phy(bp, 0x17, 0x0f01);
4842                         bnx2_read_phy(bp, 0x15, &phy2);
4843                         bnx2_write_phy(bp, 0x17, 0x0f01);
4844                         bnx2_read_phy(bp, 0x15, &phy2);
4845
4846                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4847                                 !(phy2 & 0x20)) {       /* no CONFIG */
4848
4849                                 bmcr &= ~BMCR_ANENABLE;
4850                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4851                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4852                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4853                         }
4854                 }
4855         }
4856         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4857                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4858                 u32 phy2;
4859
4860                 bnx2_write_phy(bp, 0x17, 0x0f01);
4861                 bnx2_read_phy(bp, 0x15, &phy2);
4862                 if (phy2 & 0x20) {
4863                         u32 bmcr;
4864
4865                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4866                         bmcr |= BMCR_ANENABLE;
4867                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4868
4869                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4870                 }
4871         } else
4872                 bp->current_interval = bp->timer_interval;
4873
4874         spin_unlock(&bp->phy_lock);
4875 }
4876
4877 static void
4878 bnx2_5708_serdes_timer(struct bnx2 *bp)
4879 {
4880         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4881                 return;
4882
4883         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4884                 bp->serdes_an_pending = 0;
4885                 return;
4886         }
4887
4888         spin_lock(&bp->phy_lock);
4889         if (bp->serdes_an_pending)
4890                 bp->serdes_an_pending--;
4891         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4892                 u32 bmcr;
4893
4894                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4895                 if (bmcr & BMCR_ANENABLE) {
4896                         bnx2_enable_forced_2g5(bp);
4897                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4898                 } else {
4899                         bnx2_disable_forced_2g5(bp);
4900                         bp->serdes_an_pending = 2;
4901                         bp->current_interval = bp->timer_interval;
4902                 }
4903
4904         } else
4905                 bp->current_interval = bp->timer_interval;
4906
4907         spin_unlock(&bp->phy_lock);
4908 }
4909
4910 static void
4911 bnx2_timer(unsigned long data)
4912 {
4913         struct bnx2 *bp = (struct bnx2 *) data;
4914         u32 msg;
4915
4916         if (!netif_running(bp->dev))
4917                 return;
4918
4919         if (atomic_read(&bp->intr_sem) != 0)
4920                 goto bnx2_restart_timer;
4921
4922         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4923         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4924
4925         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4926
4927         /* workaround occasional corrupted counters */
4928         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4929                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4930                                             BNX2_HC_COMMAND_STATS_NOW);
4931
4932         if (bp->phy_flags & PHY_SERDES_FLAG) {
4933                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4934                         bnx2_5706_serdes_timer(bp);
4935                 else
4936                         bnx2_5708_serdes_timer(bp);
4937         }
4938
4939 bnx2_restart_timer:
4940         mod_timer(&bp->timer, jiffies + bp->current_interval);
4941 }
4942
4943 static int
4944 bnx2_request_irq(struct bnx2 *bp)
4945 {
4946         struct net_device *dev = bp->dev;
4947         int rc = 0;
4948
4949         if (bp->flags & USING_MSI_FLAG) {
4950                 irq_handler_t   fn = bnx2_msi;
4951
4952                 if (bp->flags & ONE_SHOT_MSI_FLAG)
4953                         fn = bnx2_msi_1shot;
4954
4955                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4956         } else
4957                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4958                                  IRQF_SHARED, dev->name, dev);
4959         return rc;
4960 }
4961
4962 static void
4963 bnx2_free_irq(struct bnx2 *bp)
4964 {
4965         struct net_device *dev = bp->dev;
4966
4967         if (bp->flags & USING_MSI_FLAG) {
4968                 free_irq(bp->pdev->irq, dev);
4969                 pci_disable_msi(bp->pdev);
4970                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4971         } else
4972                 free_irq(bp->pdev->irq, dev);
4973 }
4974
4975 /* Called with rtnl_lock */
4976 static int
4977 bnx2_open(struct net_device *dev)
4978 {
4979         struct bnx2 *bp = netdev_priv(dev);
4980         int rc;
4981
4982         netif_carrier_off(dev);
4983
4984         bnx2_set_power_state(bp, PCI_D0);
4985         bnx2_disable_int(bp);
4986
4987         rc = bnx2_alloc_mem(bp);
4988         if (rc)
4989                 return rc;
4990
4991         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4992                 if (pci_enable_msi(bp->pdev) == 0) {
4993                         bp->flags |= USING_MSI_FLAG;
4994                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4995                                 bp->flags |= ONE_SHOT_MSI_FLAG;
4996                 }
4997         }
4998         rc = bnx2_request_irq(bp);
4999
5000         if (rc) {
5001                 bnx2_free_mem(bp);
5002                 return rc;
5003         }
5004
5005         rc = bnx2_init_nic(bp);
5006
5007         if (rc) {
5008                 bnx2_free_irq(bp);
5009                 bnx2_free_skbs(bp);
5010                 bnx2_free_mem(bp);
5011                 return rc;
5012         }
5013
5014         mod_timer(&bp->timer, jiffies + bp->current_interval);
5015
5016         atomic_set(&bp->intr_sem, 0);
5017
5018         bnx2_enable_int(bp);
5019
5020         if (bp->flags & USING_MSI_FLAG) {
5021                 /* Test MSI to make sure it is working
5022                  * If MSI test fails, go back to INTx mode
5023                  */
5024                 if (bnx2_test_intr(bp) != 0) {
5025                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5026                                " using MSI, switching to INTx mode. Please"
5027                                " report this failure to the PCI maintainer"
5028                                " and include system chipset information.\n",
5029                                bp->dev->name);
5030
5031                         bnx2_disable_int(bp);
5032                         bnx2_free_irq(bp);
5033
5034                         rc = bnx2_init_nic(bp);
5035
5036                         if (!rc)
5037                                 rc = bnx2_request_irq(bp);
5038
5039                         if (rc) {
5040                                 bnx2_free_skbs(bp);
5041                                 bnx2_free_mem(bp);
5042                                 del_timer_sync(&bp->timer);
5043                                 return rc;
5044                         }
5045                         bnx2_enable_int(bp);
5046                 }
5047         }
5048         if (bp->flags & USING_MSI_FLAG) {
5049                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5050         }
5051
5052         netif_start_queue(dev);
5053
5054         return 0;
5055 }
5056
5057 static void
5058 bnx2_reset_task(struct work_struct *work)
5059 {
5060         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5061
5062         if (!netif_running(bp->dev))
5063                 return;
5064
5065         bp->in_reset_task = 1;
5066         bnx2_netif_stop(bp);
5067
5068         bnx2_init_nic(bp);
5069
5070         atomic_set(&bp->intr_sem, 1);
5071         bnx2_netif_start(bp);
5072         bp->in_reset_task = 0;
5073 }
5074
5075 static void
5076 bnx2_tx_timeout(struct net_device *dev)
5077 {
5078         struct bnx2 *bp = netdev_priv(dev);
5079
5080         /* This allows the netif to be shutdown gracefully before resetting */
5081         schedule_work(&bp->reset_task);
5082 }
5083
5084 #ifdef BCM_VLAN
5085 /* Called with rtnl_lock */
5086 static void
5087 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5088 {
5089         struct bnx2 *bp = netdev_priv(dev);
5090
5091         bnx2_netif_stop(bp);
5092
5093         bp->vlgrp = vlgrp;
5094         bnx2_set_rx_mode(dev);
5095
5096         bnx2_netif_start(bp);
5097 }
5098 #endif
5099
5100 /* Called with netif_tx_lock.
5101  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5102  * netif_wake_queue().
5103  */
5104 static int
5105 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5106 {
5107         struct bnx2 *bp = netdev_priv(dev);
5108         dma_addr_t mapping;
5109         struct tx_bd *txbd;
5110         struct sw_bd *tx_buf;
5111         u32 len, vlan_tag_flags, last_frag, mss;
5112         u16 prod, ring_prod;
5113         int i;
5114
5115         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5116                 netif_stop_queue(dev);
5117                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5118                         dev->name);
5119
5120                 return NETDEV_TX_BUSY;
5121         }
5122         len = skb_headlen(skb);
5123         prod = bp->tx_prod;
5124         ring_prod = TX_RING_IDX(prod);
5125
5126         vlan_tag_flags = 0;
5127         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5128                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5129         }
5130
5131         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5132                 vlan_tag_flags |=
5133                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5134         }
5135         if ((mss = skb_shinfo(skb)->gso_size)) {
5136                 u32 tcp_opt_len, ip_tcp_len;
5137                 struct iphdr *iph;
5138
5139                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5140
5141                 tcp_opt_len = tcp_optlen(skb);
5142
5143                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5144                         u32 tcp_off = skb_transport_offset(skb) -
5145                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5146
5147                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5148                                           TX_BD_FLAGS_SW_FLAGS;
5149                         if (likely(tcp_off == 0))
5150                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5151                         else {
5152                                 tcp_off >>= 3;
5153                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5154                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5155                                                   ((tcp_off & 0x10) <<
5156                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5157                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5158                         }
5159                 } else {
5160                         if (skb_header_cloned(skb) &&
5161                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5162                                 dev_kfree_skb(skb);
5163                                 return NETDEV_TX_OK;
5164                         }
5165
5166                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5167
5168                         iph = ip_hdr(skb);
5169                         iph->check = 0;
5170                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5171                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5172                                                                  iph->daddr, 0,
5173                                                                  IPPROTO_TCP,
5174                                                                  0);
5175                         if (tcp_opt_len || (iph->ihl > 5)) {
5176                                 vlan_tag_flags |= ((iph->ihl - 5) +
5177                                                    (tcp_opt_len >> 2)) << 8;
5178                         }
5179                 }
5180         } else
5181                 mss = 0;
5182
5183         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5184
5185         tx_buf = &bp->tx_buf_ring[ring_prod];
5186         tx_buf->skb = skb;
5187         pci_unmap_addr_set(tx_buf, mapping, mapping);
5188
5189         txbd = &bp->tx_desc_ring[ring_prod];
5190
5191         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5192         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5193         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5194         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5195
5196         last_frag = skb_shinfo(skb)->nr_frags;
5197
5198         for (i = 0; i < last_frag; i++) {
5199                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5200
5201                 prod = NEXT_TX_BD(prod);
5202                 ring_prod = TX_RING_IDX(prod);
5203                 txbd = &bp->tx_desc_ring[ring_prod];
5204
5205                 len = frag->size;
5206                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5207                         len, PCI_DMA_TODEVICE);
5208                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5209                                 mapping, mapping);
5210
5211                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5212                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5213                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5214                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5215
5216         }
5217         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5218
5219         prod = NEXT_TX_BD(prod);
5220         bp->tx_prod_bseq += skb->len;
5221
5222         REG_WR16(bp, bp->tx_bidx_addr, prod);
5223         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5224
5225         mmiowb();
5226
5227         bp->tx_prod = prod;
5228         dev->trans_start = jiffies;
5229
5230         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5231                 netif_stop_queue(dev);
5232                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5233                         netif_wake_queue(dev);
5234         }
5235
5236         return NETDEV_TX_OK;
5237 }
5238
5239 /* Called with rtnl_lock */
5240 static int
5241 bnx2_close(struct net_device *dev)
5242 {
5243         struct bnx2 *bp = netdev_priv(dev);
5244         u32 reset_code;
5245
5246         /* Calling flush_scheduled_work() may deadlock because
5247          * linkwatch_event() may be on the workqueue and it will try to get
5248          * the rtnl_lock which we are holding.
5249          */
5250         while (bp->in_reset_task)
5251                 msleep(1);
5252
5253         bnx2_netif_stop(bp);
5254         del_timer_sync(&bp->timer);
5255         if (bp->flags & NO_WOL_FLAG)
5256                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5257         else if (bp->wol)
5258                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5259         else
5260                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5261         bnx2_reset_chip(bp, reset_code);
5262         bnx2_free_irq(bp);
5263         bnx2_free_skbs(bp);
5264         bnx2_free_mem(bp);
5265         bp->link_up = 0;
5266         netif_carrier_off(bp->dev);
5267         bnx2_set_power_state(bp, PCI_D3hot);
5268         return 0;
5269 }
5270
5271 #define GET_NET_STATS64(ctr)                                    \
5272         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5273         (unsigned long) (ctr##_lo)
5274
5275 #define GET_NET_STATS32(ctr)            \
5276         (ctr##_lo)
5277
5278 #if (BITS_PER_LONG == 64)
5279 #define GET_NET_STATS   GET_NET_STATS64
5280 #else
5281 #define GET_NET_STATS   GET_NET_STATS32
5282 #endif
5283
5284 static struct net_device_stats *
5285 bnx2_get_stats(struct net_device *dev)
5286 {
5287         struct bnx2 *bp = netdev_priv(dev);
5288         struct statistics_block *stats_blk = bp->stats_blk;
5289         struct net_device_stats *net_stats = &bp->net_stats;
5290
5291         if (bp->stats_blk == NULL) {
5292                 return net_stats;
5293         }
5294         net_stats->rx_packets =
5295                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5296                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5297                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5298
5299         net_stats->tx_packets =
5300                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5301                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5302                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5303
5304         net_stats->rx_bytes =
5305                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5306
5307         net_stats->tx_bytes =
5308                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5309
5310         net_stats->multicast =
5311                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5312
5313         net_stats->collisions =
5314                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5315
5316         net_stats->rx_length_errors =
5317                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5318                 stats_blk->stat_EtherStatsOverrsizePkts);
5319
5320         net_stats->rx_over_errors =
5321                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5322
5323         net_stats->rx_frame_errors =
5324                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5325
5326         net_stats->rx_crc_errors =
5327                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5328
5329         net_stats->rx_errors = net_stats->rx_length_errors +
5330                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5331                 net_stats->rx_crc_errors;
5332
5333         net_stats->tx_aborted_errors =
5334                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5335                 stats_blk->stat_Dot3StatsLateCollisions);
5336
5337         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5338             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5339                 net_stats->tx_carrier_errors = 0;
5340         else {
5341                 net_stats->tx_carrier_errors =
5342                         (unsigned long)
5343                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5344         }
5345
5346         net_stats->tx_errors =
5347                 (unsigned long)
5348                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5349                 +
5350                 net_stats->tx_aborted_errors +
5351                 net_stats->tx_carrier_errors;
5352
5353         net_stats->rx_missed_errors =
5354                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5355                 stats_blk->stat_FwRxDrop);
5356
5357         return net_stats;
5358 }
5359
5360 /* All ethtool functions called with rtnl_lock */
5361
5362 static int
5363 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5364 {
5365         struct bnx2 *bp = netdev_priv(dev);
5366
5367         cmd->supported = SUPPORTED_Autoneg;
5368         if (bp->phy_flags & PHY_SERDES_FLAG) {
5369                 cmd->supported |= SUPPORTED_1000baseT_Full |
5370                         SUPPORTED_FIBRE;
5371                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5372                         cmd->supported |= SUPPORTED_2500baseX_Full;
5373
5374                 cmd->port = PORT_FIBRE;
5375         }
5376         else {
5377                 cmd->supported |= SUPPORTED_10baseT_Half |
5378                         SUPPORTED_10baseT_Full |
5379                         SUPPORTED_100baseT_Half |
5380                         SUPPORTED_100baseT_Full |
5381                         SUPPORTED_1000baseT_Full |
5382                         SUPPORTED_TP;
5383
5384                 cmd->port = PORT_TP;
5385         }
5386
5387         cmd->advertising = bp->advertising;
5388
5389         if (bp->autoneg & AUTONEG_SPEED) {
5390                 cmd->autoneg = AUTONEG_ENABLE;
5391         }
5392         else {
5393                 cmd->autoneg = AUTONEG_DISABLE;
5394         }
5395
5396         if (netif_carrier_ok(dev)) {
5397                 cmd->speed = bp->line_speed;
5398                 cmd->duplex = bp->duplex;
5399         }
5400         else {
5401                 cmd->speed = -1;
5402                 cmd->duplex = -1;
5403         }
5404
5405         cmd->transceiver = XCVR_INTERNAL;
5406         cmd->phy_address = bp->phy_addr;
5407
5408         return 0;
5409 }
5410
5411 static int
5412 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5413 {
5414         struct bnx2 *bp = netdev_priv(dev);
5415         u8 autoneg = bp->autoneg;
5416         u8 req_duplex = bp->req_duplex;
5417         u16 req_line_speed = bp->req_line_speed;
5418         u32 advertising = bp->advertising;
5419
5420         if (cmd->autoneg == AUTONEG_ENABLE) {
5421                 autoneg |= AUTONEG_SPEED;
5422
5423                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5424
5425                 /* allow advertising 1 speed */
5426                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5427                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5428                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5429                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5430
5431                         if (bp->phy_flags & PHY_SERDES_FLAG)
5432                                 return -EINVAL;
5433
5434                         advertising = cmd->advertising;
5435
5436                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5437                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5438                                 return -EINVAL;
5439                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
5440                         advertising = cmd->advertising;
5441                 }
5442                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5443                         return -EINVAL;
5444                 }
5445                 else {
5446                         if (bp->phy_flags & PHY_SERDES_FLAG) {
5447                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5448                         }
5449                         else {
5450                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5451                         }
5452                 }
5453                 advertising |= ADVERTISED_Autoneg;
5454         }
5455         else {
5456                 if (bp->phy_flags & PHY_SERDES_FLAG) {
5457                         if ((cmd->speed != SPEED_1000 &&
5458                              cmd->speed != SPEED_2500) ||
5459                             (cmd->duplex != DUPLEX_FULL))
5460                                 return -EINVAL;
5461
5462                         if (cmd->speed == SPEED_2500 &&
5463                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5464                                 return -EINVAL;
5465                 }
5466                 else if (cmd->speed == SPEED_1000) {
5467                         return -EINVAL;
5468                 }
5469                 autoneg &= ~AUTONEG_SPEED;
5470                 req_line_speed = cmd->speed;
5471                 req_duplex = cmd->duplex;
5472                 advertising = 0;
5473         }
5474
5475         bp->autoneg = autoneg;
5476         bp->advertising = advertising;
5477         bp->req_line_speed = req_line_speed;
5478         bp->req_duplex = req_duplex;
5479
5480         spin_lock_bh(&bp->phy_lock);
5481
5482         bnx2_setup_phy(bp, bp->phy_port);
5483
5484         spin_unlock_bh(&bp->phy_lock);
5485
5486         return 0;
5487 }
5488
5489 static void
5490 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5491 {
5492         struct bnx2 *bp = netdev_priv(dev);
5493
5494         strcpy(info->driver, DRV_MODULE_NAME);
5495         strcpy(info->version, DRV_MODULE_VERSION);
5496         strcpy(info->bus_info, pci_name(bp->pdev));
5497         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5498         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5499         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5500         info->fw_version[1] = info->fw_version[3] = '.';
5501         info->fw_version[5] = 0;
5502 }
5503
5504 #define BNX2_REGDUMP_LEN                (32 * 1024)
5505
5506 static int
5507 bnx2_get_regs_len(struct net_device *dev)
5508 {
5509         return BNX2_REGDUMP_LEN;
5510 }
5511
5512 static void
5513 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5514 {
5515         u32 *p = _p, i, offset;
5516         u8 *orig_p = _p;
5517         struct bnx2 *bp = netdev_priv(dev);
5518         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5519                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5520                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5521                                  0x1040, 0x1048, 0x1080, 0x10a4,
5522                                  0x1400, 0x1490, 0x1498, 0x14f0,
5523                                  0x1500, 0x155c, 0x1580, 0x15dc,
5524                                  0x1600, 0x1658, 0x1680, 0x16d8,
5525                                  0x1800, 0x1820, 0x1840, 0x1854,
5526                                  0x1880, 0x1894, 0x1900, 0x1984,
5527                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5528                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5529                                  0x2000, 0x2030, 0x23c0, 0x2400,
5530                                  0x2800, 0x2820, 0x2830, 0x2850,
5531                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5532                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5533                                  0x4080, 0x4090, 0x43c0, 0x4458,
5534                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5535                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5536                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5537                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5538                                  0x6800, 0x6848, 0x684c, 0x6860,
5539                                  0x6888, 0x6910, 0x8000 };
5540
5541         regs->version = 0;
5542
5543         memset(p, 0, BNX2_REGDUMP_LEN);
5544
5545         if (!netif_running(bp->dev))
5546                 return;
5547
5548         i = 0;
5549         offset = reg_boundaries[0];
5550         p += offset;
5551         while (offset < BNX2_REGDUMP_LEN) {
5552                 *p++ = REG_RD(bp, offset);
5553                 offset += 4;
5554                 if (offset == reg_boundaries[i + 1]) {
5555                         offset = reg_boundaries[i + 2];
5556                         p = (u32 *) (orig_p + offset);
5557                         i += 2;
5558                 }
5559         }
5560 }
5561
5562 static void
5563 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5564 {
5565         struct bnx2 *bp = netdev_priv(dev);
5566
5567         if (bp->flags & NO_WOL_FLAG) {
5568                 wol->supported = 0;
5569                 wol->wolopts = 0;
5570         }
5571         else {
5572                 wol->supported = WAKE_MAGIC;
5573                 if (bp->wol)
5574                         wol->wolopts = WAKE_MAGIC;
5575                 else
5576                         wol->wolopts = 0;
5577         }
5578         memset(&wol->sopass, 0, sizeof(wol->sopass));
5579 }
5580
5581 static int
5582 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5583 {
5584         struct bnx2 *bp = netdev_priv(dev);
5585
5586         if (wol->wolopts & ~WAKE_MAGIC)
5587                 return -EINVAL;
5588
5589         if (wol->wolopts & WAKE_MAGIC) {
5590                 if (bp->flags & NO_WOL_FLAG)
5591                         return -EINVAL;
5592
5593                 bp->wol = 1;
5594         }
5595         else {
5596                 bp->wol = 0;
5597         }
5598         return 0;
5599 }
5600
5601 static int
5602 bnx2_nway_reset(struct net_device *dev)
5603 {
5604         struct bnx2 *bp = netdev_priv(dev);
5605         u32 bmcr;
5606
5607         if (!(bp->autoneg & AUTONEG_SPEED)) {
5608                 return -EINVAL;
5609         }
5610
5611         spin_lock_bh(&bp->phy_lock);
5612
5613         /* Force a link down visible on the other side */
5614         if (bp->phy_flags & PHY_SERDES_FLAG) {
5615                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5616                 spin_unlock_bh(&bp->phy_lock);
5617
5618                 msleep(20);
5619
5620                 spin_lock_bh(&bp->phy_lock);
5621
5622                 bp->current_interval = SERDES_AN_TIMEOUT;
5623                 bp->serdes_an_pending = 1;
5624                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5625         }
5626
5627         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5628         bmcr &= ~BMCR_LOOPBACK;
5629         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5630
5631         spin_unlock_bh(&bp->phy_lock);
5632
5633         return 0;
5634 }
5635
5636 static int
5637 bnx2_get_eeprom_len(struct net_device *dev)
5638 {
5639         struct bnx2 *bp = netdev_priv(dev);
5640
5641         if (bp->flash_info == NULL)
5642                 return 0;
5643
5644         return (int) bp->flash_size;
5645 }
5646
5647 static int
5648 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5649                 u8 *eebuf)
5650 {
5651         struct bnx2 *bp = netdev_priv(dev);
5652         int rc;
5653
5654         /* parameters already validated in ethtool_get_eeprom */
5655
5656         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5657
5658         return rc;
5659 }
5660
5661 static int
5662 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5663                 u8 *eebuf)
5664 {
5665         struct bnx2 *bp = netdev_priv(dev);
5666         int rc;
5667
5668         /* parameters already validated in ethtool_set_eeprom */
5669
5670         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5671
5672         return rc;
5673 }
5674
5675 static int
5676 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5677 {
5678         struct bnx2 *bp = netdev_priv(dev);
5679
5680         memset(coal, 0, sizeof(struct ethtool_coalesce));
5681
5682         coal->rx_coalesce_usecs = bp->rx_ticks;
5683         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5684         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5685         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5686
5687         coal->tx_coalesce_usecs = bp->tx_ticks;
5688         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5689         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5690         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5691
5692         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5693
5694         return 0;
5695 }
5696
5697 static int
5698 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5699 {
5700         struct bnx2 *bp = netdev_priv(dev);
5701
5702         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5703         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5704
5705         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5706         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5707
5708         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5709         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5710
5711         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5712         if (bp->rx_quick_cons_trip_int > 0xff)
5713                 bp->rx_quick_cons_trip_int = 0xff;
5714
5715         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5716         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5717
5718         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5719         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5720
5721         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5722         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5723
5724         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5725         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5726                 0xff;
5727
5728         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5729         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5730                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5731                         bp->stats_ticks = USEC_PER_SEC;
5732         }
5733         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5734         bp->stats_ticks &= 0xffff00;
5735
5736         if (netif_running(bp->dev)) {
5737                 bnx2_netif_stop(bp);
5738                 bnx2_init_nic(bp);
5739                 bnx2_netif_start(bp);
5740         }
5741
5742         return 0;
5743 }
5744
5745 static void
5746 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5747 {
5748         struct bnx2 *bp = netdev_priv(dev);
5749
5750         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5751         ering->rx_mini_max_pending = 0;
5752         ering->rx_jumbo_max_pending = 0;
5753
5754         ering->rx_pending = bp->rx_ring_size;
5755         ering->rx_mini_pending = 0;
5756         ering->rx_jumbo_pending = 0;
5757
5758         ering->tx_max_pending = MAX_TX_DESC_CNT;
5759         ering->tx_pending = bp->tx_ring_size;
5760 }
5761
5762 static int
5763 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5764 {
5765         struct bnx2 *bp = netdev_priv(dev);
5766
5767         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5768                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5769                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5770
5771                 return -EINVAL;
5772         }
5773         if (netif_running(bp->dev)) {
5774                 bnx2_netif_stop(bp);
5775                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5776                 bnx2_free_skbs(bp);
5777                 bnx2_free_mem(bp);
5778         }
5779
5780         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5781         bp->tx_ring_size = ering->tx_pending;
5782
5783         if (netif_running(bp->dev)) {
5784                 int rc;
5785
5786                 rc = bnx2_alloc_mem(bp);
5787                 if (rc)
5788                         return rc;
5789                 bnx2_init_nic(bp);
5790                 bnx2_netif_start(bp);
5791         }
5792
5793         return 0;
5794 }
5795
5796 static void
5797 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5798 {
5799         struct bnx2 *bp = netdev_priv(dev);
5800
5801         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5802         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5803         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5804 }
5805
5806 static int
5807 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5808 {
5809         struct bnx2 *bp = netdev_priv(dev);
5810
5811         bp->req_flow_ctrl = 0;
5812         if (epause->rx_pause)
5813                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5814         if (epause->tx_pause)
5815                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5816
5817         if (epause->autoneg) {
5818                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5819         }
5820         else {
5821                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5822         }
5823
5824         spin_lock_bh(&bp->phy_lock);
5825
5826         bnx2_setup_phy(bp, bp->phy_port);
5827
5828         spin_unlock_bh(&bp->phy_lock);
5829
5830         return 0;
5831 }
5832
5833 static u32
5834 bnx2_get_rx_csum(struct net_device *dev)
5835 {
5836         struct bnx2 *bp = netdev_priv(dev);
5837
5838         return bp->rx_csum;
5839 }
5840
5841 static int
5842 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5843 {
5844         struct bnx2 *bp = netdev_priv(dev);
5845
5846         bp->rx_csum = data;
5847         return 0;
5848 }
5849
5850 static int
5851 bnx2_set_tso(struct net_device *dev, u32 data)
5852 {
5853         struct bnx2 *bp = netdev_priv(dev);
5854
5855         if (data) {
5856                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5857                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5858                         dev->features |= NETIF_F_TSO6;
5859         } else
5860                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5861                                    NETIF_F_TSO_ECN);
5862         return 0;
5863 }
5864
5865 #define BNX2_NUM_STATS 46
5866
5867 static struct {
5868         char string[ETH_GSTRING_LEN];
5869 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5870         { "rx_bytes" },
5871         { "rx_error_bytes" },
5872         { "tx_bytes" },
5873         { "tx_error_bytes" },
5874         { "rx_ucast_packets" },
5875         { "rx_mcast_packets" },
5876         { "rx_bcast_packets" },
5877         { "tx_ucast_packets" },
5878         { "tx_mcast_packets" },
5879         { "tx_bcast_packets" },
5880         { "tx_mac_errors" },
5881         { "tx_carrier_errors" },
5882         { "rx_crc_errors" },
5883         { "rx_align_errors" },
5884         { "tx_single_collisions" },
5885         { "tx_multi_collisions" },
5886         { "tx_deferred" },
5887         { "tx_excess_collisions" },
5888         { "tx_late_collisions" },
5889         { "tx_total_collisions" },
5890         { "rx_fragments" },
5891         { "rx_jabbers" },
5892         { "rx_undersize_packets" },
5893         { "rx_oversize_packets" },
5894         { "rx_64_byte_packets" },
5895         { "rx_65_to_127_byte_packets" },
5896         { "rx_128_to_255_byte_packets" },
5897         { "rx_256_to_511_byte_packets" },
5898         { "rx_512_to_1023_byte_packets" },
5899         { "rx_1024_to_1522_byte_packets" },
5900         { "rx_1523_to_9022_byte_packets" },
5901         { "tx_64_byte_packets" },
5902         { "tx_65_to_127_byte_packets" },
5903         { "tx_128_to_255_byte_packets" },
5904         { "tx_256_to_511_byte_packets" },
5905         { "tx_512_to_1023_byte_packets" },
5906         { "tx_1024_to_1522_byte_packets" },
5907         { "tx_1523_to_9022_byte_packets" },
5908         { "rx_xon_frames" },
5909         { "rx_xoff_frames" },
5910         { "tx_xon_frames" },
5911         { "tx_xoff_frames" },
5912         { "rx_mac_ctrl_frames" },
5913         { "rx_filtered_packets" },
5914         { "rx_discards" },
5915         { "rx_fw_discards" },
5916 };
5917
5918 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5919
5920 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5921     STATS_OFFSET32(stat_IfHCInOctets_hi),
5922     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5923     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5924     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5925     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5926     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5927     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5928     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5929     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5930     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5931     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5932     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5933     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5934     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5935     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5936     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5937     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5938     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5939     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5940     STATS_OFFSET32(stat_EtherStatsCollisions),
5941     STATS_OFFSET32(stat_EtherStatsFragments),
5942     STATS_OFFSET32(stat_EtherStatsJabbers),
5943     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5944     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5945     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5946     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5947     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5948     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5949     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5950     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5951     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5952     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5953     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5954     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5955     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5956     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5957     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5958     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5959     STATS_OFFSET32(stat_XonPauseFramesReceived),
5960     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5961     STATS_OFFSET32(stat_OutXonSent),
5962     STATS_OFFSET32(stat_OutXoffSent),
5963     STATS_OFFSET32(stat_MacControlFramesReceived),
5964     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5965     STATS_OFFSET32(stat_IfInMBUFDiscards),
5966     STATS_OFFSET32(stat_FwRxDrop),
5967 };
5968
5969 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5970  * skipped because of errata.
5971  */
5972 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5973         8,0,8,8,8,8,8,8,8,8,
5974         4,0,4,4,4,4,4,4,4,4,
5975         4,4,4,4,4,4,4,4,4,4,
5976         4,4,4,4,4,4,4,4,4,4,
5977         4,4,4,4,4,4,
5978 };
5979
5980 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5981         8,0,8,8,8,8,8,8,8,8,
5982         4,4,4,4,4,4,4,4,4,4,
5983         4,4,4,4,4,4,4,4,4,4,
5984         4,4,4,4,4,4,4,4,4,4,
5985         4,4,4,4,4,4,
5986 };
5987
5988 #define BNX2_NUM_TESTS 6
5989
5990 static struct {
5991         char string[ETH_GSTRING_LEN];
5992 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5993         { "register_test (offline)" },
5994         { "memory_test (offline)" },
5995         { "loopback_test (offline)" },
5996         { "nvram_test (online)" },
5997         { "interrupt_test (online)" },
5998         { "link_test (online)" },
5999 };
6000
6001 static int
6002 bnx2_self_test_count(struct net_device *dev)
6003 {
6004         return BNX2_NUM_TESTS;
6005 }
6006
6007 static void
6008 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6009 {
6010         struct bnx2 *bp = netdev_priv(dev);
6011
6012         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6013         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6014                 int i;
6015
6016                 bnx2_netif_stop(bp);
6017                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6018                 bnx2_free_skbs(bp);
6019
6020                 if (bnx2_test_registers(bp) != 0) {
6021                         buf[0] = 1;
6022                         etest->flags |= ETH_TEST_FL_FAILED;
6023                 }
6024                 if (bnx2_test_memory(bp) != 0) {
6025                         buf[1] = 1;
6026                         etest->flags |= ETH_TEST_FL_FAILED;
6027                 }
6028                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6029                         etest->flags |= ETH_TEST_FL_FAILED;
6030
6031                 if (!netif_running(bp->dev)) {
6032                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6033                 }
6034                 else {
6035                         bnx2_init_nic(bp);
6036                         bnx2_netif_start(bp);
6037                 }
6038
6039                 /* wait for link up */
6040                 for (i = 0; i < 7; i++) {
6041                         if (bp->link_up)
6042                                 break;
6043                         msleep_interruptible(1000);
6044                 }
6045         }
6046
6047         if (bnx2_test_nvram(bp) != 0) {
6048                 buf[3] = 1;
6049                 etest->flags |= ETH_TEST_FL_FAILED;
6050         }
6051         if (bnx2_test_intr(bp) != 0) {
6052                 buf[4] = 1;
6053                 etest->flags |= ETH_TEST_FL_FAILED;
6054         }
6055
6056         if (bnx2_test_link(bp) != 0) {
6057                 buf[5] = 1;
6058                 etest->flags |= ETH_TEST_FL_FAILED;
6059
6060         }
6061 }
6062
6063 static void
6064 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6065 {
6066         switch (stringset) {
6067         case ETH_SS_STATS:
6068                 memcpy(buf, bnx2_stats_str_arr,
6069                         sizeof(bnx2_stats_str_arr));
6070                 break;
6071         case ETH_SS_TEST:
6072                 memcpy(buf, bnx2_tests_str_arr,
6073                         sizeof(bnx2_tests_str_arr));
6074                 break;
6075         }
6076 }
6077
6078 static int
6079 bnx2_get_stats_count(struct net_device *dev)
6080 {
6081         return BNX2_NUM_STATS;
6082 }
6083
6084 static void
6085 bnx2_get_ethtool_stats(struct net_device *dev,
6086                 struct ethtool_stats *stats, u64 *buf)
6087 {
6088         struct bnx2 *bp = netdev_priv(dev);
6089         int i;
6090         u32 *hw_stats = (u32 *) bp->stats_blk;
6091         u8 *stats_len_arr = NULL;
6092
6093         if (hw_stats == NULL) {
6094                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6095                 return;
6096         }
6097
6098         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6099             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6100             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6101             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6102                 stats_len_arr = bnx2_5706_stats_len_arr;
6103         else
6104                 stats_len_arr = bnx2_5708_stats_len_arr;
6105
6106         for (i = 0; i < BNX2_NUM_STATS; i++) {
6107                 if (stats_len_arr[i] == 0) {
6108                         /* skip this counter */
6109                         buf[i] = 0;
6110                         continue;
6111                 }
6112                 if (stats_len_arr[i] == 4) {
6113                         /* 4-byte counter */
6114                         buf[i] = (u64)
6115                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6116                         continue;
6117                 }
6118                 /* 8-byte counter */
6119                 buf[i] = (((u64) *(hw_stats +
6120                                         bnx2_stats_offset_arr[i])) << 32) +
6121                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6122         }
6123 }
6124
6125 static int
6126 bnx2_phys_id(struct net_device *dev, u32 data)
6127 {
6128         struct bnx2 *bp = netdev_priv(dev);
6129         int i;
6130         u32 save;
6131
6132         if (data == 0)
6133                 data = 2;
6134
6135         save = REG_RD(bp, BNX2_MISC_CFG);
6136         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6137
6138         for (i = 0; i < (data * 2); i++) {
6139                 if ((i % 2) == 0) {
6140                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6141                 }
6142                 else {
6143                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6144                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6145                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6146                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6147                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6148                                 BNX2_EMAC_LED_TRAFFIC);
6149                 }
6150                 msleep_interruptible(500);
6151                 if (signal_pending(current))
6152                         break;
6153         }
6154         REG_WR(bp, BNX2_EMAC_LED, 0);
6155         REG_WR(bp, BNX2_MISC_CFG, save);
6156         return 0;
6157 }
6158
6159 static int
6160 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6161 {
6162         struct bnx2 *bp = netdev_priv(dev);
6163
6164         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6165                 return (ethtool_op_set_tx_hw_csum(dev, data));
6166         else
6167                 return (ethtool_op_set_tx_csum(dev, data));
6168 }
6169
6170 static const struct ethtool_ops bnx2_ethtool_ops = {
6171         .get_settings           = bnx2_get_settings,
6172         .set_settings           = bnx2_set_settings,
6173         .get_drvinfo            = bnx2_get_drvinfo,
6174         .get_regs_len           = bnx2_get_regs_len,
6175         .get_regs               = bnx2_get_regs,
6176         .get_wol                = bnx2_get_wol,
6177         .set_wol                = bnx2_set_wol,
6178         .nway_reset             = bnx2_nway_reset,
6179         .get_link               = ethtool_op_get_link,
6180         .get_eeprom_len         = bnx2_get_eeprom_len,
6181         .get_eeprom             = bnx2_get_eeprom,
6182         .set_eeprom             = bnx2_set_eeprom,
6183         .get_coalesce           = bnx2_get_coalesce,
6184         .set_coalesce           = bnx2_set_coalesce,
6185         .get_ringparam          = bnx2_get_ringparam,
6186         .set_ringparam          = bnx2_set_ringparam,
6187         .get_pauseparam         = bnx2_get_pauseparam,
6188         .set_pauseparam         = bnx2_set_pauseparam,
6189         .get_rx_csum            = bnx2_get_rx_csum,
6190         .set_rx_csum            = bnx2_set_rx_csum,
6191         .get_tx_csum            = ethtool_op_get_tx_csum,
6192         .set_tx_csum            = bnx2_set_tx_csum,
6193         .get_sg                 = ethtool_op_get_sg,
6194         .set_sg                 = ethtool_op_set_sg,
6195         .get_tso                = ethtool_op_get_tso,
6196         .set_tso                = bnx2_set_tso,
6197         .self_test_count        = bnx2_self_test_count,
6198         .self_test              = bnx2_self_test,
6199         .get_strings            = bnx2_get_strings,
6200         .phys_id                = bnx2_phys_id,
6201         .get_stats_count        = bnx2_get_stats_count,
6202         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6203         .get_perm_addr          = ethtool_op_get_perm_addr,
6204 };
6205
6206 /* Called with rtnl_lock */
6207 static int
6208 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6209 {
6210         struct mii_ioctl_data *data = if_mii(ifr);
6211         struct bnx2 *bp = netdev_priv(dev);
6212         int err;
6213
6214         switch(cmd) {
6215         case SIOCGMIIPHY:
6216                 data->phy_id = bp->phy_addr;
6217
6218                 /* fallthru */
6219         case SIOCGMIIREG: {
6220                 u32 mii_regval;
6221
6222                 if (!netif_running(dev))
6223                         return -EAGAIN;
6224
6225                 spin_lock_bh(&bp->phy_lock);
6226                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6227                 spin_unlock_bh(&bp->phy_lock);
6228
6229                 data->val_out = mii_regval;
6230
6231                 return err;
6232         }
6233
6234         case SIOCSMIIREG:
6235                 if (!capable(CAP_NET_ADMIN))
6236                         return -EPERM;
6237
6238                 if (!netif_running(dev))
6239                         return -EAGAIN;
6240
6241                 spin_lock_bh(&bp->phy_lock);
6242                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6243                 spin_unlock_bh(&bp->phy_lock);
6244
6245                 return err;
6246
6247         default:
6248                 /* do nothing */
6249                 break;
6250         }
6251         return -EOPNOTSUPP;
6252 }
6253
6254 /* Called with rtnl_lock */
6255 static int
6256 bnx2_change_mac_addr(struct net_device *dev, void *p)
6257 {
6258         struct sockaddr *addr = p;
6259         struct bnx2 *bp = netdev_priv(dev);
6260
6261         if (!is_valid_ether_addr(addr->sa_data))
6262                 return -EINVAL;
6263
6264         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6265         if (netif_running(dev))
6266                 bnx2_set_mac_addr(bp);
6267
6268         return 0;
6269 }
6270
6271 /* Called with rtnl_lock */
6272 static int
6273 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6274 {
6275         struct bnx2 *bp = netdev_priv(dev);
6276
6277         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6278                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6279                 return -EINVAL;
6280
6281         dev->mtu = new_mtu;
6282         if (netif_running(dev)) {
6283                 bnx2_netif_stop(bp);
6284
6285                 bnx2_init_nic(bp);
6286
6287                 bnx2_netif_start(bp);
6288         }
6289         return 0;
6290 }
6291
6292 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6293 static void
6294 poll_bnx2(struct net_device *dev)
6295 {
6296         struct bnx2 *bp = netdev_priv(dev);
6297
6298         disable_irq(bp->pdev->irq);
6299         bnx2_interrupt(bp->pdev->irq, dev);
6300         enable_irq(bp->pdev->irq);
6301 }
6302 #endif
6303
6304 static void __devinit
6305 bnx2_get_5709_media(struct bnx2 *bp)
6306 {
6307         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6308         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6309         u32 strap;
6310
6311         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6312                 return;
6313         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6314                 bp->phy_flags |= PHY_SERDES_FLAG;
6315                 return;
6316         }
6317
6318         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6319                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6320         else
6321                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6322
6323         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6324                 switch (strap) {
6325                 case 0x4:
6326                 case 0x5:
6327                 case 0x6:
6328                         bp->phy_flags |= PHY_SERDES_FLAG;
6329                         return;
6330                 }
6331         } else {
6332                 switch (strap) {
6333                 case 0x1:
6334                 case 0x2:
6335                 case 0x4:
6336                         bp->phy_flags |= PHY_SERDES_FLAG;
6337                         return;
6338                 }
6339         }
6340 }
6341
6342 static void __devinit
6343 bnx2_get_pci_speed(struct bnx2 *bp)
6344 {
6345         u32 reg;
6346
6347         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6348         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6349                 u32 clkreg;
6350
6351                 bp->flags |= PCIX_FLAG;
6352
6353                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6354
6355                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6356                 switch (clkreg) {
6357                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6358                         bp->bus_speed_mhz = 133;
6359                         break;
6360
6361                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6362                         bp->bus_speed_mhz = 100;
6363                         break;
6364
6365                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6366                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6367                         bp->bus_speed_mhz = 66;
6368                         break;
6369
6370                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6371                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6372                         bp->bus_speed_mhz = 50;
6373                         break;
6374
6375                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6376                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6377                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6378                         bp->bus_speed_mhz = 33;
6379                         break;
6380                 }
6381         }
6382         else {
6383                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6384                         bp->bus_speed_mhz = 66;
6385                 else
6386                         bp->bus_speed_mhz = 33;
6387         }
6388
6389         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6390                 bp->flags |= PCI_32BIT_FLAG;
6391
6392 }
6393
6394 static int __devinit
6395 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6396 {
6397         struct bnx2 *bp;
6398         unsigned long mem_len;
6399         int rc;
6400         u32 reg;
6401         u64 dma_mask, persist_dma_mask;
6402
6403         SET_MODULE_OWNER(dev);
6404         SET_NETDEV_DEV(dev, &pdev->dev);
6405         bp = netdev_priv(dev);
6406
6407         bp->flags = 0;
6408         bp->phy_flags = 0;
6409
6410         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6411         rc = pci_enable_device(pdev);
6412         if (rc) {
6413                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6414                 goto err_out;
6415         }
6416
6417         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6418                 dev_err(&pdev->dev,
6419                         "Cannot find PCI device base address, aborting.\n");
6420                 rc = -ENODEV;
6421                 goto err_out_disable;
6422         }
6423
6424         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6425         if (rc) {
6426                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6427                 goto err_out_disable;
6428         }
6429
6430         pci_set_master(pdev);
6431
6432         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6433         if (bp->pm_cap == 0) {
6434                 dev_err(&pdev->dev,
6435                         "Cannot find power management capability, aborting.\n");
6436                 rc = -EIO;
6437                 goto err_out_release;
6438         }
6439
6440         bp->dev = dev;
6441         bp->pdev = pdev;
6442
6443         spin_lock_init(&bp->phy_lock);
6444         spin_lock_init(&bp->indirect_lock);
6445         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6446
6447         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6448         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6449         dev->mem_end = dev->mem_start + mem_len;
6450         dev->irq = pdev->irq;
6451
6452         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6453
6454         if (!bp->regview) {
6455                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6456                 rc = -ENOMEM;
6457                 goto err_out_release;
6458         }
6459
6460         /* Configure byte swap and enable write to the reg_window registers.
6461          * Rely on CPU to do target byte swapping on big endian systems
6462          * The chip's target access swapping will not swap all accesses
6463          */
6464         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6465                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6466                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6467
6468         bnx2_set_power_state(bp, PCI_D0);
6469
6470         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6471
6472         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6473                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6474                         dev_err(&pdev->dev,
6475                                 "Cannot find PCIE capability, aborting.\n");
6476                         rc = -EIO;
6477                         goto err_out_unmap;
6478                 }
6479                 bp->flags |= PCIE_FLAG;
6480         } else {
6481                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6482                 if (bp->pcix_cap == 0) {
6483                         dev_err(&pdev->dev,
6484                                 "Cannot find PCIX capability, aborting.\n");
6485                         rc = -EIO;
6486                         goto err_out_unmap;
6487                 }
6488         }
6489
6490         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6491                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6492                         bp->flags |= MSI_CAP_FLAG;
6493         }
6494
6495         /* 5708 cannot support DMA addresses > 40-bit.  */
6496         if (CHIP_NUM(bp) == CHIP_NUM_5708)
6497                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6498         else
6499                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6500
6501         /* Configure DMA attributes. */
6502         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6503                 dev->features |= NETIF_F_HIGHDMA;
6504                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6505                 if (rc) {
6506                         dev_err(&pdev->dev,
6507                                 "pci_set_consistent_dma_mask failed, aborting.\n");
6508                         goto err_out_unmap;
6509                 }
6510         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6511                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6512                 goto err_out_unmap;
6513         }
6514
6515         if (!(bp->flags & PCIE_FLAG))
6516                 bnx2_get_pci_speed(bp);
6517
6518         /* 5706A0 may falsely detect SERR and PERR. */
6519         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6520                 reg = REG_RD(bp, PCI_COMMAND);
6521                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6522                 REG_WR(bp, PCI_COMMAND, reg);
6523         }
6524         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6525                 !(bp->flags & PCIX_FLAG)) {
6526
6527                 dev_err(&pdev->dev,
6528                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
6529                 goto err_out_unmap;
6530         }
6531
6532         bnx2_init_nvram(bp);
6533
6534         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6535
6536         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6537             BNX2_SHM_HDR_SIGNATURE_SIG) {
6538                 u32 off = PCI_FUNC(pdev->devfn) << 2;
6539
6540                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6541         } else
6542                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6543
6544         /* Get the permanent MAC address.  First we need to make sure the
6545          * firmware is actually running.
6546          */
6547         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6548
6549         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6550             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6551                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6552                 rc = -ENODEV;
6553                 goto err_out_unmap;
6554         }
6555
6556         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6557
6558         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6559         bp->mac_addr[0] = (u8) (reg >> 8);
6560         bp->mac_addr[1] = (u8) reg;
6561
6562         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6563         bp->mac_addr[2] = (u8) (reg >> 24);
6564         bp->mac_addr[3] = (u8) (reg >> 16);
6565         bp->mac_addr[4] = (u8) (reg >> 8);
6566         bp->mac_addr[5] = (u8) reg;
6567
6568         bp->tx_ring_size = MAX_TX_DESC_CNT;
6569         bnx2_set_rx_ring_size(bp, 255);
6570
6571         bp->rx_csum = 1;
6572
6573         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6574
6575         bp->tx_quick_cons_trip_int = 20;
6576         bp->tx_quick_cons_trip = 20;
6577         bp->tx_ticks_int = 80;
6578         bp->tx_ticks = 80;
6579
6580         bp->rx_quick_cons_trip_int = 6;
6581         bp->rx_quick_cons_trip = 6;
6582         bp->rx_ticks_int = 18;
6583         bp->rx_ticks = 18;
6584
6585         bp->stats_ticks = 1000000 & 0xffff00;
6586
6587         bp->timer_interval =  HZ;
6588         bp->current_interval =  HZ;
6589
6590         bp->phy_addr = 1;
6591
6592         /* Disable WOL support if we are running on a SERDES chip. */
6593         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6594                 bnx2_get_5709_media(bp);
6595         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6596                 bp->phy_flags |= PHY_SERDES_FLAG;
6597
6598         bp->phy_port = PORT_TP;
6599         if (bp->phy_flags & PHY_SERDES_FLAG) {
6600                 bp->phy_port = PORT_FIBRE;
6601                 bp->flags |= NO_WOL_FLAG;
6602                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6603                         bp->phy_addr = 2;
6604                         reg = REG_RD_IND(bp, bp->shmem_base +
6605                                          BNX2_SHARED_HW_CFG_CONFIG);
6606                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6607                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6608                 }
6609                 bnx2_init_remote_phy(bp);
6610
6611         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6612                    CHIP_NUM(bp) == CHIP_NUM_5708)
6613                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6614         else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6615                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6616
6617         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6618             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6619             (CHIP_ID(bp) == CHIP_ID_5708_B1))
6620                 bp->flags |= NO_WOL_FLAG;
6621
6622         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6623                 bp->tx_quick_cons_trip_int =
6624                         bp->tx_quick_cons_trip;
6625                 bp->tx_ticks_int = bp->tx_ticks;
6626                 bp->rx_quick_cons_trip_int =
6627                         bp->rx_quick_cons_trip;
6628                 bp->rx_ticks_int = bp->rx_ticks;
6629                 bp->comp_prod_trip_int = bp->comp_prod_trip;
6630                 bp->com_ticks_int = bp->com_ticks;
6631                 bp->cmd_ticks_int = bp->cmd_ticks;
6632         }
6633
6634         /* Disable MSI on 5706 if AMD 8132 bridge is found.
6635          *
6636          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
6637          * with byte enables disabled on the unused 32-bit word.  This is legal
6638          * but causes problems on the AMD 8132 which will eventually stop
6639          * responding after a while.
6640          *
6641          * AMD believes this incompatibility is unique to the 5706, and
6642          * prefers to locally disable MSI rather than globally disabling it.
6643          */
6644         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6645                 struct pci_dev *amd_8132 = NULL;
6646
6647                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6648                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
6649                                                   amd_8132))) {
6650                         u8 rev;
6651
6652                         pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6653                         if (rev >= 0x10 && rev <= 0x13) {
6654                                 disable_msi = 1;
6655                                 pci_dev_put(amd_8132);
6656                                 break;
6657                         }
6658                 }
6659         }
6660
6661         bnx2_set_default_link(bp);
6662         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6663
6664         init_timer(&bp->timer);
6665         bp->timer.expires = RUN_AT(bp->timer_interval);
6666         bp->timer.data = (unsigned long) bp;
6667         bp->timer.function = bnx2_timer;
6668
6669         return 0;
6670
6671 err_out_unmap:
6672         if (bp->regview) {
6673                 iounmap(bp->regview);
6674                 bp->regview = NULL;
6675         }
6676
6677 err_out_release:
6678         pci_release_regions(pdev);
6679
6680 err_out_disable:
6681         pci_disable_device(pdev);
6682         pci_set_drvdata(pdev, NULL);
6683
6684 err_out:
6685         return rc;
6686 }
6687
6688 static char * __devinit
6689 bnx2_bus_string(struct bnx2 *bp, char *str)
6690 {
6691         char *s = str;
6692
6693         if (bp->flags & PCIE_FLAG) {
6694                 s += sprintf(s, "PCI Express");
6695         } else {
6696                 s += sprintf(s, "PCI");
6697                 if (bp->flags & PCIX_FLAG)
6698                         s += sprintf(s, "-X");
6699                 if (bp->flags & PCI_32BIT_FLAG)
6700                         s += sprintf(s, " 32-bit");
6701                 else
6702                         s += sprintf(s, " 64-bit");
6703                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6704         }
6705         return str;
6706 }
6707
6708 static int __devinit
6709 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6710 {
6711         static int version_printed = 0;
6712         struct net_device *dev = NULL;
6713         struct bnx2 *bp;
6714         int rc, i;
6715         char str[40];
6716
6717         if (version_printed++ == 0)
6718                 printk(KERN_INFO "%s", version);
6719
6720         /* dev zeroed in init_etherdev */
6721         dev = alloc_etherdev(sizeof(*bp));
6722
6723         if (!dev)
6724                 return -ENOMEM;
6725
6726         rc = bnx2_init_board(pdev, dev);
6727         if (rc < 0) {
6728                 free_netdev(dev);
6729                 return rc;
6730         }
6731
6732         dev->open = bnx2_open;
6733         dev->hard_start_xmit = bnx2_start_xmit;
6734         dev->stop = bnx2_close;
6735         dev->get_stats = bnx2_get_stats;
6736         dev->set_multicast_list = bnx2_set_rx_mode;
6737         dev->do_ioctl = bnx2_ioctl;
6738         dev->set_mac_address = bnx2_change_mac_addr;
6739         dev->change_mtu = bnx2_change_mtu;
6740         dev->tx_timeout = bnx2_tx_timeout;
6741         dev->watchdog_timeo = TX_TIMEOUT;
6742 #ifdef BCM_VLAN
6743         dev->vlan_rx_register = bnx2_vlan_rx_register;
6744 #endif
6745         dev->poll = bnx2_poll;
6746         dev->ethtool_ops = &bnx2_ethtool_ops;
6747         dev->weight = 64;
6748
6749         bp = netdev_priv(dev);
6750
6751 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6752         dev->poll_controller = poll_bnx2;
6753 #endif
6754
6755         pci_set_drvdata(pdev, dev);
6756
6757         memcpy(dev->dev_addr, bp->mac_addr, 6);
6758         memcpy(dev->perm_addr, bp->mac_addr, 6);
6759         bp->name = board_info[ent->driver_data].name;
6760
6761         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6762         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6763                 dev->features |= NETIF_F_IPV6_CSUM;
6764
6765 #ifdef BCM_VLAN
6766         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6767 #endif
6768         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6769         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6770                 dev->features |= NETIF_F_TSO6;
6771
6772         if ((rc = register_netdev(dev))) {
6773                 dev_err(&pdev->dev, "Cannot register net device\n");
6774                 if (bp->regview)
6775                         iounmap(bp->regview);
6776                 pci_release_regions(pdev);
6777                 pci_disable_device(pdev);
6778                 pci_set_drvdata(pdev, NULL);
6779                 free_netdev(dev);
6780                 return rc;
6781         }
6782
6783         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6784                 "IRQ %d, ",
6785                 dev->name,
6786                 bp->name,
6787                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6788                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6789                 bnx2_bus_string(bp, str),
6790                 dev->base_addr,
6791                 bp->pdev->irq);
6792
6793         printk("node addr ");
6794         for (i = 0; i < 6; i++)
6795                 printk("%2.2x", dev->dev_addr[i]);
6796         printk("\n");
6797
6798         return 0;
6799 }
6800
6801 static void __devexit
6802 bnx2_remove_one(struct pci_dev *pdev)
6803 {
6804         struct net_device *dev = pci_get_drvdata(pdev);
6805         struct bnx2 *bp = netdev_priv(dev);
6806
6807         flush_scheduled_work();
6808
6809         unregister_netdev(dev);
6810
6811         if (bp->regview)
6812                 iounmap(bp->regview);
6813
6814         free_netdev(dev);
6815         pci_release_regions(pdev);
6816         pci_disable_device(pdev);
6817         pci_set_drvdata(pdev, NULL);
6818 }
6819
6820 static int
6821 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6822 {
6823         struct net_device *dev = pci_get_drvdata(pdev);
6824         struct bnx2 *bp = netdev_priv(dev);
6825         u32 reset_code;
6826
6827         if (!netif_running(dev))
6828                 return 0;
6829
6830         flush_scheduled_work();
6831         bnx2_netif_stop(bp);
6832         netif_device_detach(dev);
6833         del_timer_sync(&bp->timer);
6834         if (bp->flags & NO_WOL_FLAG)
6835                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6836         else if (bp->wol)
6837                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6838         else
6839                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6840         bnx2_reset_chip(bp, reset_code);
6841         bnx2_free_skbs(bp);
6842         pci_save_state(pdev);
6843         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6844         return 0;
6845 }
6846
6847 static int
6848 bnx2_resume(struct pci_dev *pdev)
6849 {
6850         struct net_device *dev = pci_get_drvdata(pdev);
6851         struct bnx2 *bp = netdev_priv(dev);
6852
6853         if (!netif_running(dev))
6854                 return 0;
6855
6856         pci_restore_state(pdev);
6857         bnx2_set_power_state(bp, PCI_D0);
6858         netif_device_attach(dev);
6859         bnx2_init_nic(bp);
6860         bnx2_netif_start(bp);
6861         return 0;
6862 }
6863
6864 static struct pci_driver bnx2_pci_driver = {
6865         .name           = DRV_MODULE_NAME,
6866         .id_table       = bnx2_pci_tbl,
6867         .probe          = bnx2_init_one,
6868         .remove         = __devexit_p(bnx2_remove_one),
6869         .suspend        = bnx2_suspend,
6870         .resume         = bnx2_resume,
6871 };
6872
6873 static int __init bnx2_init(void)
6874 {
6875         return pci_register_driver(&bnx2_pci_driver);
6876 }
6877
6878 static void __exit bnx2_cleanup(void)
6879 {
6880         pci_unregister_driver(&bnx2_pci_driver);
6881 }
6882
6883 module_init(bnx2_init);
6884 module_exit(bnx2_cleanup);
6885
6886
6887