[BNX2]: Add function to fetch hardware tx index.
[sfrench/cifs-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.0"
60 #define DRV_MODULE_RELDATE      "December 11, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bp->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405 }
406
407 static void
408 bnx2_enable_int(struct bnx2 *bp)
409 {
410         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
414         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
417         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
418 }
419
420 static void
421 bnx2_disable_int_sync(struct bnx2 *bp)
422 {
423         atomic_inc(&bp->intr_sem);
424         bnx2_disable_int(bp);
425         synchronize_irq(bp->pdev->irq);
426 }
427
428 static void
429 bnx2_netif_stop(struct bnx2 *bp)
430 {
431         bnx2_disable_int_sync(bp);
432         if (netif_running(bp->dev)) {
433                 napi_disable(&bp->napi);
434                 netif_tx_disable(bp->dev);
435                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436         }
437 }
438
439 static void
440 bnx2_netif_start(struct bnx2 *bp)
441 {
442         if (atomic_dec_and_test(&bp->intr_sem)) {
443                 if (netif_running(bp->dev)) {
444                         netif_wake_queue(bp->dev);
445                         napi_enable(&bp->napi);
446                         bnx2_enable_int(bp);
447                 }
448         }
449 }
450
451 static void
452 bnx2_free_mem(struct bnx2 *bp)
453 {
454         int i;
455
456         for (i = 0; i < bp->ctx_pages; i++) {
457                 if (bp->ctx_blk[i]) {
458                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459                                             bp->ctx_blk[i],
460                                             bp->ctx_blk_mapping[i]);
461                         bp->ctx_blk[i] = NULL;
462                 }
463         }
464         if (bp->status_blk) {
465                 pci_free_consistent(bp->pdev, bp->status_stats_size,
466                                     bp->status_blk, bp->status_blk_mapping);
467                 bp->status_blk = NULL;
468                 bp->stats_blk = NULL;
469         }
470         if (bp->tx_desc_ring) {
471                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
472                                     bp->tx_desc_ring, bp->tx_desc_mapping);
473                 bp->tx_desc_ring = NULL;
474         }
475         kfree(bp->tx_buf_ring);
476         bp->tx_buf_ring = NULL;
477         for (i = 0; i < bp->rx_max_ring; i++) {
478                 if (bp->rx_desc_ring[i])
479                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
480                                             bp->rx_desc_ring[i],
481                                             bp->rx_desc_mapping[i]);
482                 bp->rx_desc_ring[i] = NULL;
483         }
484         vfree(bp->rx_buf_ring);
485         bp->rx_buf_ring = NULL;
486         for (i = 0; i < bp->rx_max_pg_ring; i++) {
487                 if (bp->rx_pg_desc_ring[i])
488                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
489                                             bp->rx_pg_desc_ring[i],
490                                             bp->rx_pg_desc_mapping[i]);
491                 bp->rx_pg_desc_ring[i] = NULL;
492         }
493         if (bp->rx_pg_ring)
494                 vfree(bp->rx_pg_ring);
495         bp->rx_pg_ring = NULL;
496 }
497
498 static int
499 bnx2_alloc_mem(struct bnx2 *bp)
500 {
501         int i, status_blk_size;
502
503         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
504         if (bp->tx_buf_ring == NULL)
505                 return -ENOMEM;
506
507         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
508                                                 &bp->tx_desc_mapping);
509         if (bp->tx_desc_ring == NULL)
510                 goto alloc_mem_err;
511
512         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
513         if (bp->rx_buf_ring == NULL)
514                 goto alloc_mem_err;
515
516         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
517
518         for (i = 0; i < bp->rx_max_ring; i++) {
519                 bp->rx_desc_ring[i] =
520                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
521                                              &bp->rx_desc_mapping[i]);
522                 if (bp->rx_desc_ring[i] == NULL)
523                         goto alloc_mem_err;
524
525         }
526
527         if (bp->rx_pg_ring_size) {
528                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
529                                          bp->rx_max_pg_ring);
530                 if (bp->rx_pg_ring == NULL)
531                         goto alloc_mem_err;
532
533                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
534                        bp->rx_max_pg_ring);
535         }
536
537         for (i = 0; i < bp->rx_max_pg_ring; i++) {
538                 bp->rx_pg_desc_ring[i] =
539                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
540                                              &bp->rx_pg_desc_mapping[i]);
541                 if (bp->rx_pg_desc_ring[i] == NULL)
542                         goto alloc_mem_err;
543
544         }
545
546         /* Combine status and statistics blocks into one allocation. */
547         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
548         bp->status_stats_size = status_blk_size +
549                                 sizeof(struct statistics_block);
550
551         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
552                                               &bp->status_blk_mapping);
553         if (bp->status_blk == NULL)
554                 goto alloc_mem_err;
555
556         memset(bp->status_blk, 0, bp->status_stats_size);
557
558         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
559                                   status_blk_size);
560
561         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
562
563         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
564                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
565                 if (bp->ctx_pages == 0)
566                         bp->ctx_pages = 1;
567                 for (i = 0; i < bp->ctx_pages; i++) {
568                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
569                                                 BCM_PAGE_SIZE,
570                                                 &bp->ctx_blk_mapping[i]);
571                         if (bp->ctx_blk[i] == NULL)
572                                 goto alloc_mem_err;
573                 }
574         }
575         return 0;
576
577 alloc_mem_err:
578         bnx2_free_mem(bp);
579         return -ENOMEM;
580 }
581
582 static void
583 bnx2_report_fw_link(struct bnx2 *bp)
584 {
585         u32 fw_link_status = 0;
586
587         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
588                 return;
589
590         if (bp->link_up) {
591                 u32 bmsr;
592
593                 switch (bp->line_speed) {
594                 case SPEED_10:
595                         if (bp->duplex == DUPLEX_HALF)
596                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
597                         else
598                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
599                         break;
600                 case SPEED_100:
601                         if (bp->duplex == DUPLEX_HALF)
602                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
603                         else
604                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
605                         break;
606                 case SPEED_1000:
607                         if (bp->duplex == DUPLEX_HALF)
608                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
609                         else
610                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
611                         break;
612                 case SPEED_2500:
613                         if (bp->duplex == DUPLEX_HALF)
614                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
615                         else
616                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
617                         break;
618                 }
619
620                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
621
622                 if (bp->autoneg) {
623                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
624
625                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
626                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
627
628                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
629                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
630                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
631                         else
632                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
633                 }
634         }
635         else
636                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
637
638         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
639 }
640
641 static char *
642 bnx2_xceiver_str(struct bnx2 *bp)
643 {
644         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
645                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
646                  "Copper"));
647 }
648
649 static void
650 bnx2_report_link(struct bnx2 *bp)
651 {
652         if (bp->link_up) {
653                 netif_carrier_on(bp->dev);
654                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
655                        bnx2_xceiver_str(bp));
656
657                 printk("%d Mbps ", bp->line_speed);
658
659                 if (bp->duplex == DUPLEX_FULL)
660                         printk("full duplex");
661                 else
662                         printk("half duplex");
663
664                 if (bp->flow_ctrl) {
665                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
666                                 printk(", receive ");
667                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
668                                         printk("& transmit ");
669                         }
670                         else {
671                                 printk(", transmit ");
672                         }
673                         printk("flow control ON");
674                 }
675                 printk("\n");
676         }
677         else {
678                 netif_carrier_off(bp->dev);
679                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
680                        bnx2_xceiver_str(bp));
681         }
682
683         bnx2_report_fw_link(bp);
684 }
685
686 static void
687 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
688 {
689         u32 local_adv, remote_adv;
690
691         bp->flow_ctrl = 0;
692         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
693                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
694
695                 if (bp->duplex == DUPLEX_FULL) {
696                         bp->flow_ctrl = bp->req_flow_ctrl;
697                 }
698                 return;
699         }
700
701         if (bp->duplex != DUPLEX_FULL) {
702                 return;
703         }
704
705         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
706             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
707                 u32 val;
708
709                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
710                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
711                         bp->flow_ctrl |= FLOW_CTRL_TX;
712                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
713                         bp->flow_ctrl |= FLOW_CTRL_RX;
714                 return;
715         }
716
717         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
718         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
719
720         if (bp->phy_flags & PHY_SERDES_FLAG) {
721                 u32 new_local_adv = 0;
722                 u32 new_remote_adv = 0;
723
724                 if (local_adv & ADVERTISE_1000XPAUSE)
725                         new_local_adv |= ADVERTISE_PAUSE_CAP;
726                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
727                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
728                 if (remote_adv & ADVERTISE_1000XPAUSE)
729                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
730                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
731                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
732
733                 local_adv = new_local_adv;
734                 remote_adv = new_remote_adv;
735         }
736
737         /* See Table 28B-3 of 802.3ab-1999 spec. */
738         if (local_adv & ADVERTISE_PAUSE_CAP) {
739                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
740                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
741                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
742                         }
743                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
744                                 bp->flow_ctrl = FLOW_CTRL_RX;
745                         }
746                 }
747                 else {
748                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
749                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
750                         }
751                 }
752         }
753         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
754                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
755                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
756
757                         bp->flow_ctrl = FLOW_CTRL_TX;
758                 }
759         }
760 }
761
762 static int
763 bnx2_5709s_linkup(struct bnx2 *bp)
764 {
765         u32 val, speed;
766
767         bp->link_up = 1;
768
769         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
770         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
771         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
772
773         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
774                 bp->line_speed = bp->req_line_speed;
775                 bp->duplex = bp->req_duplex;
776                 return 0;
777         }
778         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
779         switch (speed) {
780                 case MII_BNX2_GP_TOP_AN_SPEED_10:
781                         bp->line_speed = SPEED_10;
782                         break;
783                 case MII_BNX2_GP_TOP_AN_SPEED_100:
784                         bp->line_speed = SPEED_100;
785                         break;
786                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
787                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
788                         bp->line_speed = SPEED_1000;
789                         break;
790                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
791                         bp->line_speed = SPEED_2500;
792                         break;
793         }
794         if (val & MII_BNX2_GP_TOP_AN_FD)
795                 bp->duplex = DUPLEX_FULL;
796         else
797                 bp->duplex = DUPLEX_HALF;
798         return 0;
799 }
800
801 static int
802 bnx2_5708s_linkup(struct bnx2 *bp)
803 {
804         u32 val;
805
806         bp->link_up = 1;
807         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
808         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
809                 case BCM5708S_1000X_STAT1_SPEED_10:
810                         bp->line_speed = SPEED_10;
811                         break;
812                 case BCM5708S_1000X_STAT1_SPEED_100:
813                         bp->line_speed = SPEED_100;
814                         break;
815                 case BCM5708S_1000X_STAT1_SPEED_1G:
816                         bp->line_speed = SPEED_1000;
817                         break;
818                 case BCM5708S_1000X_STAT1_SPEED_2G5:
819                         bp->line_speed = SPEED_2500;
820                         break;
821         }
822         if (val & BCM5708S_1000X_STAT1_FD)
823                 bp->duplex = DUPLEX_FULL;
824         else
825                 bp->duplex = DUPLEX_HALF;
826
827         return 0;
828 }
829
830 static int
831 bnx2_5706s_linkup(struct bnx2 *bp)
832 {
833         u32 bmcr, local_adv, remote_adv, common;
834
835         bp->link_up = 1;
836         bp->line_speed = SPEED_1000;
837
838         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
839         if (bmcr & BMCR_FULLDPLX) {
840                 bp->duplex = DUPLEX_FULL;
841         }
842         else {
843                 bp->duplex = DUPLEX_HALF;
844         }
845
846         if (!(bmcr & BMCR_ANENABLE)) {
847                 return 0;
848         }
849
850         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
851         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
852
853         common = local_adv & remote_adv;
854         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
855
856                 if (common & ADVERTISE_1000XFULL) {
857                         bp->duplex = DUPLEX_FULL;
858                 }
859                 else {
860                         bp->duplex = DUPLEX_HALF;
861                 }
862         }
863
864         return 0;
865 }
866
867 static int
868 bnx2_copper_linkup(struct bnx2 *bp)
869 {
870         u32 bmcr;
871
872         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
873         if (bmcr & BMCR_ANENABLE) {
874                 u32 local_adv, remote_adv, common;
875
876                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
877                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
878
879                 common = local_adv & (remote_adv >> 2);
880                 if (common & ADVERTISE_1000FULL) {
881                         bp->line_speed = SPEED_1000;
882                         bp->duplex = DUPLEX_FULL;
883                 }
884                 else if (common & ADVERTISE_1000HALF) {
885                         bp->line_speed = SPEED_1000;
886                         bp->duplex = DUPLEX_HALF;
887                 }
888                 else {
889                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
890                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
891
892                         common = local_adv & remote_adv;
893                         if (common & ADVERTISE_100FULL) {
894                                 bp->line_speed = SPEED_100;
895                                 bp->duplex = DUPLEX_FULL;
896                         }
897                         else if (common & ADVERTISE_100HALF) {
898                                 bp->line_speed = SPEED_100;
899                                 bp->duplex = DUPLEX_HALF;
900                         }
901                         else if (common & ADVERTISE_10FULL) {
902                                 bp->line_speed = SPEED_10;
903                                 bp->duplex = DUPLEX_FULL;
904                         }
905                         else if (common & ADVERTISE_10HALF) {
906                                 bp->line_speed = SPEED_10;
907                                 bp->duplex = DUPLEX_HALF;
908                         }
909                         else {
910                                 bp->line_speed = 0;
911                                 bp->link_up = 0;
912                         }
913                 }
914         }
915         else {
916                 if (bmcr & BMCR_SPEED100) {
917                         bp->line_speed = SPEED_100;
918                 }
919                 else {
920                         bp->line_speed = SPEED_10;
921                 }
922                 if (bmcr & BMCR_FULLDPLX) {
923                         bp->duplex = DUPLEX_FULL;
924                 }
925                 else {
926                         bp->duplex = DUPLEX_HALF;
927                 }
928         }
929
930         return 0;
931 }
932
933 static int
934 bnx2_set_mac_link(struct bnx2 *bp)
935 {
936         u32 val;
937
938         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
939         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
940                 (bp->duplex == DUPLEX_HALF)) {
941                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
942         }
943
944         /* Configure the EMAC mode register. */
945         val = REG_RD(bp, BNX2_EMAC_MODE);
946
947         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
948                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
949                 BNX2_EMAC_MODE_25G_MODE);
950
951         if (bp->link_up) {
952                 switch (bp->line_speed) {
953                         case SPEED_10:
954                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
955                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
956                                         break;
957                                 }
958                                 /* fall through */
959                         case SPEED_100:
960                                 val |= BNX2_EMAC_MODE_PORT_MII;
961                                 break;
962                         case SPEED_2500:
963                                 val |= BNX2_EMAC_MODE_25G_MODE;
964                                 /* fall through */
965                         case SPEED_1000:
966                                 val |= BNX2_EMAC_MODE_PORT_GMII;
967                                 break;
968                 }
969         }
970         else {
971                 val |= BNX2_EMAC_MODE_PORT_GMII;
972         }
973
974         /* Set the MAC to operate in the appropriate duplex mode. */
975         if (bp->duplex == DUPLEX_HALF)
976                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
977         REG_WR(bp, BNX2_EMAC_MODE, val);
978
979         /* Enable/disable rx PAUSE. */
980         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
981
982         if (bp->flow_ctrl & FLOW_CTRL_RX)
983                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
984         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
985
986         /* Enable/disable tx PAUSE. */
987         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
988         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
989
990         if (bp->flow_ctrl & FLOW_CTRL_TX)
991                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
992         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
993
994         /* Acknowledge the interrupt. */
995         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
996
997         return 0;
998 }
999
1000 static void
1001 bnx2_enable_bmsr1(struct bnx2 *bp)
1002 {
1003         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1004             (CHIP_NUM(bp) == CHIP_NUM_5709))
1005                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1006                                MII_BNX2_BLK_ADDR_GP_STATUS);
1007 }
1008
1009 static void
1010 bnx2_disable_bmsr1(struct bnx2 *bp)
1011 {
1012         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1013             (CHIP_NUM(bp) == CHIP_NUM_5709))
1014                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1015                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1016 }
1017
1018 static int
1019 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1020 {
1021         u32 up1;
1022         int ret = 1;
1023
1024         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025                 return 0;
1026
1027         if (bp->autoneg & AUTONEG_SPEED)
1028                 bp->advertising |= ADVERTISED_2500baseX_Full;
1029
1030         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1031                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1032
1033         bnx2_read_phy(bp, bp->mii_up1, &up1);
1034         if (!(up1 & BCM5708S_UP1_2G5)) {
1035                 up1 |= BCM5708S_UP1_2G5;
1036                 bnx2_write_phy(bp, bp->mii_up1, up1);
1037                 ret = 0;
1038         }
1039
1040         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1041                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1042                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1043
1044         return ret;
1045 }
1046
1047 static int
1048 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1049 {
1050         u32 up1;
1051         int ret = 0;
1052
1053         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1054                 return 0;
1055
1056         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1058
1059         bnx2_read_phy(bp, bp->mii_up1, &up1);
1060         if (up1 & BCM5708S_UP1_2G5) {
1061                 up1 &= ~BCM5708S_UP1_2G5;
1062                 bnx2_write_phy(bp, bp->mii_up1, up1);
1063                 ret = 1;
1064         }
1065
1066         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1067                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1068                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1069
1070         return ret;
1071 }
1072
1073 static void
1074 bnx2_enable_forced_2g5(struct bnx2 *bp)
1075 {
1076         u32 bmcr;
1077
1078         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1079                 return;
1080
1081         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1082                 u32 val;
1083
1084                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1085                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1086                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1087                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1088                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1089                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090
1091                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094
1095         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1096                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1098         }
1099
1100         if (bp->autoneg & AUTONEG_SPEED) {
1101                 bmcr &= ~BMCR_ANENABLE;
1102                 if (bp->req_duplex == DUPLEX_FULL)
1103                         bmcr |= BMCR_FULLDPLX;
1104         }
1105         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1106 }
1107
1108 static void
1109 bnx2_disable_forced_2g5(struct bnx2 *bp)
1110 {
1111         u32 bmcr;
1112
1113         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1114                 return;
1115
1116         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1117                 u32 val;
1118
1119                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1120                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1121                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1122                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1123                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1124
1125                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1126                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1127                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1128
1129         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1130                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1131                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1132         }
1133
1134         if (bp->autoneg & AUTONEG_SPEED)
1135                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1136         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1137 }
1138
1139 static int
1140 bnx2_set_link(struct bnx2 *bp)
1141 {
1142         u32 bmsr;
1143         u8 link_up;
1144
1145         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1146                 bp->link_up = 1;
1147                 return 0;
1148         }
1149
1150         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1151                 return 0;
1152
1153         link_up = bp->link_up;
1154
1155         bnx2_enable_bmsr1(bp);
1156         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1157         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1158         bnx2_disable_bmsr1(bp);
1159
1160         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1161             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1162                 u32 val;
1163
1164                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1165                 if (val & BNX2_EMAC_STATUS_LINK)
1166                         bmsr |= BMSR_LSTATUS;
1167                 else
1168                         bmsr &= ~BMSR_LSTATUS;
1169         }
1170
1171         if (bmsr & BMSR_LSTATUS) {
1172                 bp->link_up = 1;
1173
1174                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1175                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1176                                 bnx2_5706s_linkup(bp);
1177                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1178                                 bnx2_5708s_linkup(bp);
1179                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1180                                 bnx2_5709s_linkup(bp);
1181                 }
1182                 else {
1183                         bnx2_copper_linkup(bp);
1184                 }
1185                 bnx2_resolve_flow_ctrl(bp);
1186         }
1187         else {
1188                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1189                     (bp->autoneg & AUTONEG_SPEED))
1190                         bnx2_disable_forced_2g5(bp);
1191
1192                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1193                 bp->link_up = 0;
1194         }
1195
1196         if (bp->link_up != link_up) {
1197                 bnx2_report_link(bp);
1198         }
1199
1200         bnx2_set_mac_link(bp);
1201
1202         return 0;
1203 }
1204
1205 static int
1206 bnx2_reset_phy(struct bnx2 *bp)
1207 {
1208         int i;
1209         u32 reg;
1210
1211         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1212
1213 #define PHY_RESET_MAX_WAIT 100
1214         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1215                 udelay(10);
1216
1217                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1218                 if (!(reg & BMCR_RESET)) {
1219                         udelay(20);
1220                         break;
1221                 }
1222         }
1223         if (i == PHY_RESET_MAX_WAIT) {
1224                 return -EBUSY;
1225         }
1226         return 0;
1227 }
1228
1229 static u32
1230 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1231 {
1232         u32 adv = 0;
1233
1234         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1235                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1236
1237                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1238                         adv = ADVERTISE_1000XPAUSE;
1239                 }
1240                 else {
1241                         adv = ADVERTISE_PAUSE_CAP;
1242                 }
1243         }
1244         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1245                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1246                         adv = ADVERTISE_1000XPSE_ASYM;
1247                 }
1248                 else {
1249                         adv = ADVERTISE_PAUSE_ASYM;
1250                 }
1251         }
1252         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1253                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1255                 }
1256                 else {
1257                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1258                 }
1259         }
1260         return adv;
1261 }
1262
1263 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1264
1265 static int
1266 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1267 {
1268         u32 speed_arg = 0, pause_adv;
1269
1270         pause_adv = bnx2_phy_get_pause_adv(bp);
1271
1272         if (bp->autoneg & AUTONEG_SPEED) {
1273                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1274                 if (bp->advertising & ADVERTISED_10baseT_Half)
1275                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1276                 if (bp->advertising & ADVERTISED_10baseT_Full)
1277                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1278                 if (bp->advertising & ADVERTISED_100baseT_Half)
1279                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1280                 if (bp->advertising & ADVERTISED_100baseT_Full)
1281                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1282                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1283                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1284                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1285                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1286         } else {
1287                 if (bp->req_line_speed == SPEED_2500)
1288                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1289                 else if (bp->req_line_speed == SPEED_1000)
1290                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1291                 else if (bp->req_line_speed == SPEED_100) {
1292                         if (bp->req_duplex == DUPLEX_FULL)
1293                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1294                         else
1295                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296                 } else if (bp->req_line_speed == SPEED_10) {
1297                         if (bp->req_duplex == DUPLEX_FULL)
1298                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1299                         else
1300                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1301                 }
1302         }
1303
1304         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1305                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1306         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1307                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1308
1309         if (port == PORT_TP)
1310                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1311                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1312
1313         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1314
1315         spin_unlock_bh(&bp->phy_lock);
1316         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1317         spin_lock_bh(&bp->phy_lock);
1318
1319         return 0;
1320 }
1321
1322 static int
1323 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1324 {
1325         u32 adv, bmcr;
1326         u32 new_adv = 0;
1327
1328         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1329                 return (bnx2_setup_remote_phy(bp, port));
1330
1331         if (!(bp->autoneg & AUTONEG_SPEED)) {
1332                 u32 new_bmcr;
1333                 int force_link_down = 0;
1334
1335                 if (bp->req_line_speed == SPEED_2500) {
1336                         if (!bnx2_test_and_enable_2g5(bp))
1337                                 force_link_down = 1;
1338                 } else if (bp->req_line_speed == SPEED_1000) {
1339                         if (bnx2_test_and_disable_2g5(bp))
1340                                 force_link_down = 1;
1341                 }
1342                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1343                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1344
1345                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1346                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1347                 new_bmcr |= BMCR_SPEED1000;
1348
1349                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1350                         if (bp->req_line_speed == SPEED_2500)
1351                                 bnx2_enable_forced_2g5(bp);
1352                         else if (bp->req_line_speed == SPEED_1000) {
1353                                 bnx2_disable_forced_2g5(bp);
1354                                 new_bmcr &= ~0x2000;
1355                         }
1356
1357                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1358                         if (bp->req_line_speed == SPEED_2500)
1359                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1360                         else
1361                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1362                 }
1363
1364                 if (bp->req_duplex == DUPLEX_FULL) {
1365                         adv |= ADVERTISE_1000XFULL;
1366                         new_bmcr |= BMCR_FULLDPLX;
1367                 }
1368                 else {
1369                         adv |= ADVERTISE_1000XHALF;
1370                         new_bmcr &= ~BMCR_FULLDPLX;
1371                 }
1372                 if ((new_bmcr != bmcr) || (force_link_down)) {
1373                         /* Force a link down visible on the other side */
1374                         if (bp->link_up) {
1375                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1376                                                ~(ADVERTISE_1000XFULL |
1377                                                  ADVERTISE_1000XHALF));
1378                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1379                                         BMCR_ANRESTART | BMCR_ANENABLE);
1380
1381                                 bp->link_up = 0;
1382                                 netif_carrier_off(bp->dev);
1383                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1384                                 bnx2_report_link(bp);
1385                         }
1386                         bnx2_write_phy(bp, bp->mii_adv, adv);
1387                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1388                 } else {
1389                         bnx2_resolve_flow_ctrl(bp);
1390                         bnx2_set_mac_link(bp);
1391                 }
1392                 return 0;
1393         }
1394
1395         bnx2_test_and_enable_2g5(bp);
1396
1397         if (bp->advertising & ADVERTISED_1000baseT_Full)
1398                 new_adv |= ADVERTISE_1000XFULL;
1399
1400         new_adv |= bnx2_phy_get_pause_adv(bp);
1401
1402         bnx2_read_phy(bp, bp->mii_adv, &adv);
1403         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1404
1405         bp->serdes_an_pending = 0;
1406         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1407                 /* Force a link down visible on the other side */
1408                 if (bp->link_up) {
1409                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1410                         spin_unlock_bh(&bp->phy_lock);
1411                         msleep(20);
1412                         spin_lock_bh(&bp->phy_lock);
1413                 }
1414
1415                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1416                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1417                         BMCR_ANENABLE);
1418                 /* Speed up link-up time when the link partner
1419                  * does not autonegotiate which is very common
1420                  * in blade servers. Some blade servers use
1421                  * IPMI for kerboard input and it's important
1422                  * to minimize link disruptions. Autoneg. involves
1423                  * exchanging base pages plus 3 next pages and
1424                  * normally completes in about 120 msec.
1425                  */
1426                 bp->current_interval = SERDES_AN_TIMEOUT;
1427                 bp->serdes_an_pending = 1;
1428                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1429         } else {
1430                 bnx2_resolve_flow_ctrl(bp);
1431                 bnx2_set_mac_link(bp);
1432         }
1433
1434         return 0;
1435 }
1436
1437 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1438         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1439                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1440                 (ADVERTISED_1000baseT_Full)
1441
1442 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1443         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1444         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1445         ADVERTISED_1000baseT_Full)
1446
1447 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1448         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1449
1450 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1451
1452 static void
1453 bnx2_set_default_remote_link(struct bnx2 *bp)
1454 {
1455         u32 link;
1456
1457         if (bp->phy_port == PORT_TP)
1458                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1459         else
1460                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1461
1462         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1463                 bp->req_line_speed = 0;
1464                 bp->autoneg |= AUTONEG_SPEED;
1465                 bp->advertising = ADVERTISED_Autoneg;
1466                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1467                         bp->advertising |= ADVERTISED_10baseT_Half;
1468                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1469                         bp->advertising |= ADVERTISED_10baseT_Full;
1470                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1471                         bp->advertising |= ADVERTISED_100baseT_Half;
1472                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1473                         bp->advertising |= ADVERTISED_100baseT_Full;
1474                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1475                         bp->advertising |= ADVERTISED_1000baseT_Full;
1476                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1477                         bp->advertising |= ADVERTISED_2500baseX_Full;
1478         } else {
1479                 bp->autoneg = 0;
1480                 bp->advertising = 0;
1481                 bp->req_duplex = DUPLEX_FULL;
1482                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1483                         bp->req_line_speed = SPEED_10;
1484                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1485                                 bp->req_duplex = DUPLEX_HALF;
1486                 }
1487                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1488                         bp->req_line_speed = SPEED_100;
1489                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1490                                 bp->req_duplex = DUPLEX_HALF;
1491                 }
1492                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1493                         bp->req_line_speed = SPEED_1000;
1494                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1495                         bp->req_line_speed = SPEED_2500;
1496         }
1497 }
1498
1499 static void
1500 bnx2_set_default_link(struct bnx2 *bp)
1501 {
1502         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1503                 return bnx2_set_default_remote_link(bp);
1504
1505         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1506         bp->req_line_speed = 0;
1507         if (bp->phy_flags & PHY_SERDES_FLAG) {
1508                 u32 reg;
1509
1510                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1511
1512                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1513                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1514                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1515                         bp->autoneg = 0;
1516                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1517                         bp->req_duplex = DUPLEX_FULL;
1518                 }
1519         } else
1520                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1521 }
1522
1523 static void
1524 bnx2_send_heart_beat(struct bnx2 *bp)
1525 {
1526         u32 msg;
1527         u32 addr;
1528
1529         spin_lock(&bp->indirect_lock);
1530         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1531         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1532         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1533         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1534         spin_unlock(&bp->indirect_lock);
1535 }
1536
1537 static void
1538 bnx2_remote_phy_event(struct bnx2 *bp)
1539 {
1540         u32 msg;
1541         u8 link_up = bp->link_up;
1542         u8 old_port;
1543
1544         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1545
1546         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1547                 bnx2_send_heart_beat(bp);
1548
1549         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1550
1551         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1552                 bp->link_up = 0;
1553         else {
1554                 u32 speed;
1555
1556                 bp->link_up = 1;
1557                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1558                 bp->duplex = DUPLEX_FULL;
1559                 switch (speed) {
1560                         case BNX2_LINK_STATUS_10HALF:
1561                                 bp->duplex = DUPLEX_HALF;
1562                         case BNX2_LINK_STATUS_10FULL:
1563                                 bp->line_speed = SPEED_10;
1564                                 break;
1565                         case BNX2_LINK_STATUS_100HALF:
1566                                 bp->duplex = DUPLEX_HALF;
1567                         case BNX2_LINK_STATUS_100BASE_T4:
1568                         case BNX2_LINK_STATUS_100FULL:
1569                                 bp->line_speed = SPEED_100;
1570                                 break;
1571                         case BNX2_LINK_STATUS_1000HALF:
1572                                 bp->duplex = DUPLEX_HALF;
1573                         case BNX2_LINK_STATUS_1000FULL:
1574                                 bp->line_speed = SPEED_1000;
1575                                 break;
1576                         case BNX2_LINK_STATUS_2500HALF:
1577                                 bp->duplex = DUPLEX_HALF;
1578                         case BNX2_LINK_STATUS_2500FULL:
1579                                 bp->line_speed = SPEED_2500;
1580                                 break;
1581                         default:
1582                                 bp->line_speed = 0;
1583                                 break;
1584                 }
1585
1586                 spin_lock(&bp->phy_lock);
1587                 bp->flow_ctrl = 0;
1588                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1589                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1590                         if (bp->duplex == DUPLEX_FULL)
1591                                 bp->flow_ctrl = bp->req_flow_ctrl;
1592                 } else {
1593                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1594                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1595                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1596                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1597                 }
1598
1599                 old_port = bp->phy_port;
1600                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1601                         bp->phy_port = PORT_FIBRE;
1602                 else
1603                         bp->phy_port = PORT_TP;
1604
1605                 if (old_port != bp->phy_port)
1606                         bnx2_set_default_link(bp);
1607
1608                 spin_unlock(&bp->phy_lock);
1609         }
1610         if (bp->link_up != link_up)
1611                 bnx2_report_link(bp);
1612
1613         bnx2_set_mac_link(bp);
1614 }
1615
1616 static int
1617 bnx2_set_remote_link(struct bnx2 *bp)
1618 {
1619         u32 evt_code;
1620
1621         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1622         switch (evt_code) {
1623                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1624                         bnx2_remote_phy_event(bp);
1625                         break;
1626                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1627                 default:
1628                         bnx2_send_heart_beat(bp);
1629                         break;
1630         }
1631         return 0;
1632 }
1633
1634 static int
1635 bnx2_setup_copper_phy(struct bnx2 *bp)
1636 {
1637         u32 bmcr;
1638         u32 new_bmcr;
1639
1640         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1641
1642         if (bp->autoneg & AUTONEG_SPEED) {
1643                 u32 adv_reg, adv1000_reg;
1644                 u32 new_adv_reg = 0;
1645                 u32 new_adv1000_reg = 0;
1646
1647                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1648                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1649                         ADVERTISE_PAUSE_ASYM);
1650
1651                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1652                 adv1000_reg &= PHY_ALL_1000_SPEED;
1653
1654                 if (bp->advertising & ADVERTISED_10baseT_Half)
1655                         new_adv_reg |= ADVERTISE_10HALF;
1656                 if (bp->advertising & ADVERTISED_10baseT_Full)
1657                         new_adv_reg |= ADVERTISE_10FULL;
1658                 if (bp->advertising & ADVERTISED_100baseT_Half)
1659                         new_adv_reg |= ADVERTISE_100HALF;
1660                 if (bp->advertising & ADVERTISED_100baseT_Full)
1661                         new_adv_reg |= ADVERTISE_100FULL;
1662                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1663                         new_adv1000_reg |= ADVERTISE_1000FULL;
1664
1665                 new_adv_reg |= ADVERTISE_CSMA;
1666
1667                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1668
1669                 if ((adv1000_reg != new_adv1000_reg) ||
1670                         (adv_reg != new_adv_reg) ||
1671                         ((bmcr & BMCR_ANENABLE) == 0)) {
1672
1673                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1674                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1675                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1676                                 BMCR_ANENABLE);
1677                 }
1678                 else if (bp->link_up) {
1679                         /* Flow ctrl may have changed from auto to forced */
1680                         /* or vice-versa. */
1681
1682                         bnx2_resolve_flow_ctrl(bp);
1683                         bnx2_set_mac_link(bp);
1684                 }
1685                 return 0;
1686         }
1687
1688         new_bmcr = 0;
1689         if (bp->req_line_speed == SPEED_100) {
1690                 new_bmcr |= BMCR_SPEED100;
1691         }
1692         if (bp->req_duplex == DUPLEX_FULL) {
1693                 new_bmcr |= BMCR_FULLDPLX;
1694         }
1695         if (new_bmcr != bmcr) {
1696                 u32 bmsr;
1697
1698                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1699                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1700
1701                 if (bmsr & BMSR_LSTATUS) {
1702                         /* Force link down */
1703                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1704                         spin_unlock_bh(&bp->phy_lock);
1705                         msleep(50);
1706                         spin_lock_bh(&bp->phy_lock);
1707
1708                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1709                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1710                 }
1711
1712                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1713
1714                 /* Normally, the new speed is setup after the link has
1715                  * gone down and up again. In some cases, link will not go
1716                  * down so we need to set up the new speed here.
1717                  */
1718                 if (bmsr & BMSR_LSTATUS) {
1719                         bp->line_speed = bp->req_line_speed;
1720                         bp->duplex = bp->req_duplex;
1721                         bnx2_resolve_flow_ctrl(bp);
1722                         bnx2_set_mac_link(bp);
1723                 }
1724         } else {
1725                 bnx2_resolve_flow_ctrl(bp);
1726                 bnx2_set_mac_link(bp);
1727         }
1728         return 0;
1729 }
1730
1731 static int
1732 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1733 {
1734         if (bp->loopback == MAC_LOOPBACK)
1735                 return 0;
1736
1737         if (bp->phy_flags & PHY_SERDES_FLAG) {
1738                 return (bnx2_setup_serdes_phy(bp, port));
1739         }
1740         else {
1741                 return (bnx2_setup_copper_phy(bp));
1742         }
1743 }
1744
1745 static int
1746 bnx2_init_5709s_phy(struct bnx2 *bp)
1747 {
1748         u32 val;
1749
1750         bp->mii_bmcr = MII_BMCR + 0x10;
1751         bp->mii_bmsr = MII_BMSR + 0x10;
1752         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1753         bp->mii_adv = MII_ADVERTISE + 0x10;
1754         bp->mii_lpa = MII_LPA + 0x10;
1755         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1756
1757         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1758         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1759
1760         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761         bnx2_reset_phy(bp);
1762
1763         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1764
1765         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1766         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1767         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1768         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1769
1770         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1771         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1772         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1773                 val |= BCM5708S_UP1_2G5;
1774         else
1775                 val &= ~BCM5708S_UP1_2G5;
1776         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1777
1778         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1779         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1780         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1781         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1782
1783         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1784
1785         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1786               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1787         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1788
1789         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1790
1791         return 0;
1792 }
1793
1794 static int
1795 bnx2_init_5708s_phy(struct bnx2 *bp)
1796 {
1797         u32 val;
1798
1799         bnx2_reset_phy(bp);
1800
1801         bp->mii_up1 = BCM5708S_UP1;
1802
1803         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1804         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1805         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1806
1807         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1808         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1809         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1810
1811         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1812         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1813         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1814
1815         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1816                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1817                 val |= BCM5708S_UP1_2G5;
1818                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1819         }
1820
1821         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1822             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1823             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1824                 /* increase tx signal amplitude */
1825                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1826                                BCM5708S_BLK_ADDR_TX_MISC);
1827                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1828                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1829                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1830                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1831         }
1832
1833         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1834               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1835
1836         if (val) {
1837                 u32 is_backplane;
1838
1839                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1840                                           BNX2_SHARED_HW_CFG_CONFIG);
1841                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1842                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1843                                        BCM5708S_BLK_ADDR_TX_MISC);
1844                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1845                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1846                                        BCM5708S_BLK_ADDR_DIG);
1847                 }
1848         }
1849         return 0;
1850 }
1851
1852 static int
1853 bnx2_init_5706s_phy(struct bnx2 *bp)
1854 {
1855         bnx2_reset_phy(bp);
1856
1857         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1858
1859         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1860                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1861
1862         if (bp->dev->mtu > 1500) {
1863                 u32 val;
1864
1865                 /* Set extended packet length bit */
1866                 bnx2_write_phy(bp, 0x18, 0x7);
1867                 bnx2_read_phy(bp, 0x18, &val);
1868                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1869
1870                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1871                 bnx2_read_phy(bp, 0x1c, &val);
1872                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1873         }
1874         else {
1875                 u32 val;
1876
1877                 bnx2_write_phy(bp, 0x18, 0x7);
1878                 bnx2_read_phy(bp, 0x18, &val);
1879                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1880
1881                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1882                 bnx2_read_phy(bp, 0x1c, &val);
1883                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1884         }
1885
1886         return 0;
1887 }
1888
1889 static int
1890 bnx2_init_copper_phy(struct bnx2 *bp)
1891 {
1892         u32 val;
1893
1894         bnx2_reset_phy(bp);
1895
1896         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1897                 bnx2_write_phy(bp, 0x18, 0x0c00);
1898                 bnx2_write_phy(bp, 0x17, 0x000a);
1899                 bnx2_write_phy(bp, 0x15, 0x310b);
1900                 bnx2_write_phy(bp, 0x17, 0x201f);
1901                 bnx2_write_phy(bp, 0x15, 0x9506);
1902                 bnx2_write_phy(bp, 0x17, 0x401f);
1903                 bnx2_write_phy(bp, 0x15, 0x14e2);
1904                 bnx2_write_phy(bp, 0x18, 0x0400);
1905         }
1906
1907         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1908                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1909                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1910                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1911                 val &= ~(1 << 8);
1912                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1913         }
1914
1915         if (bp->dev->mtu > 1500) {
1916                 /* Set extended packet length bit */
1917                 bnx2_write_phy(bp, 0x18, 0x7);
1918                 bnx2_read_phy(bp, 0x18, &val);
1919                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1920
1921                 bnx2_read_phy(bp, 0x10, &val);
1922                 bnx2_write_phy(bp, 0x10, val | 0x1);
1923         }
1924         else {
1925                 bnx2_write_phy(bp, 0x18, 0x7);
1926                 bnx2_read_phy(bp, 0x18, &val);
1927                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1928
1929                 bnx2_read_phy(bp, 0x10, &val);
1930                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1931         }
1932
1933         /* ethernet@wirespeed */
1934         bnx2_write_phy(bp, 0x18, 0x7007);
1935         bnx2_read_phy(bp, 0x18, &val);
1936         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1937         return 0;
1938 }
1939
1940
1941 static int
1942 bnx2_init_phy(struct bnx2 *bp)
1943 {
1944         u32 val;
1945         int rc = 0;
1946
1947         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1948         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1949
1950         bp->mii_bmcr = MII_BMCR;
1951         bp->mii_bmsr = MII_BMSR;
1952         bp->mii_bmsr1 = MII_BMSR;
1953         bp->mii_adv = MII_ADVERTISE;
1954         bp->mii_lpa = MII_LPA;
1955
1956         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1957
1958         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1959                 goto setup_phy;
1960
1961         bnx2_read_phy(bp, MII_PHYSID1, &val);
1962         bp->phy_id = val << 16;
1963         bnx2_read_phy(bp, MII_PHYSID2, &val);
1964         bp->phy_id |= val & 0xffff;
1965
1966         if (bp->phy_flags & PHY_SERDES_FLAG) {
1967                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1968                         rc = bnx2_init_5706s_phy(bp);
1969                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1970                         rc = bnx2_init_5708s_phy(bp);
1971                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1972                         rc = bnx2_init_5709s_phy(bp);
1973         }
1974         else {
1975                 rc = bnx2_init_copper_phy(bp);
1976         }
1977
1978 setup_phy:
1979         if (!rc)
1980                 rc = bnx2_setup_phy(bp, bp->phy_port);
1981
1982         return rc;
1983 }
1984
1985 static int
1986 bnx2_set_mac_loopback(struct bnx2 *bp)
1987 {
1988         u32 mac_mode;
1989
1990         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1992         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1993         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1994         bp->link_up = 1;
1995         return 0;
1996 }
1997
1998 static int bnx2_test_link(struct bnx2 *);
1999
2000 static int
2001 bnx2_set_phy_loopback(struct bnx2 *bp)
2002 {
2003         u32 mac_mode;
2004         int rc, i;
2005
2006         spin_lock_bh(&bp->phy_lock);
2007         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2008                             BMCR_SPEED1000);
2009         spin_unlock_bh(&bp->phy_lock);
2010         if (rc)
2011                 return rc;
2012
2013         for (i = 0; i < 10; i++) {
2014                 if (bnx2_test_link(bp) == 0)
2015                         break;
2016                 msleep(100);
2017         }
2018
2019         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2020         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2021                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2022                       BNX2_EMAC_MODE_25G_MODE);
2023
2024         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2025         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2026         bp->link_up = 1;
2027         return 0;
2028 }
2029
2030 static int
2031 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2032 {
2033         int i;
2034         u32 val;
2035
2036         bp->fw_wr_seq++;
2037         msg_data |= bp->fw_wr_seq;
2038
2039         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2040
2041         /* wait for an acknowledgement. */
2042         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2043                 msleep(10);
2044
2045                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2046
2047                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2048                         break;
2049         }
2050         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2051                 return 0;
2052
2053         /* If we timed out, inform the firmware that this is the case. */
2054         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2055                 if (!silent)
2056                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2057                                             "%x\n", msg_data);
2058
2059                 msg_data &= ~BNX2_DRV_MSG_CODE;
2060                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2061
2062                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2063
2064                 return -EBUSY;
2065         }
2066
2067         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2068                 return -EIO;
2069
2070         return 0;
2071 }
2072
2073 static int
2074 bnx2_init_5709_context(struct bnx2 *bp)
2075 {
2076         int i, ret = 0;
2077         u32 val;
2078
2079         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2080         val |= (BCM_PAGE_BITS - 8) << 16;
2081         REG_WR(bp, BNX2_CTX_COMMAND, val);
2082         for (i = 0; i < 10; i++) {
2083                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2084                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2085                         break;
2086                 udelay(2);
2087         }
2088         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2089                 return -EBUSY;
2090
2091         for (i = 0; i < bp->ctx_pages; i++) {
2092                 int j;
2093
2094                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2095                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2096                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2097                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2098                        (u64) bp->ctx_blk_mapping[i] >> 32);
2099                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2100                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2101                 for (j = 0; j < 10; j++) {
2102
2103                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2104                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2105                                 break;
2106                         udelay(5);
2107                 }
2108                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2109                         ret = -EBUSY;
2110                         break;
2111                 }
2112         }
2113         return ret;
2114 }
2115
2116 static void
2117 bnx2_init_context(struct bnx2 *bp)
2118 {
2119         u32 vcid;
2120
2121         vcid = 96;
2122         while (vcid) {
2123                 u32 vcid_addr, pcid_addr, offset;
2124                 int i;
2125
2126                 vcid--;
2127
2128                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2129                         u32 new_vcid;
2130
2131                         vcid_addr = GET_PCID_ADDR(vcid);
2132                         if (vcid & 0x8) {
2133                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2134                         }
2135                         else {
2136                                 new_vcid = vcid;
2137                         }
2138                         pcid_addr = GET_PCID_ADDR(new_vcid);
2139                 }
2140                 else {
2141                         vcid_addr = GET_CID_ADDR(vcid);
2142                         pcid_addr = vcid_addr;
2143                 }
2144
2145                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2146                         vcid_addr += (i << PHY_CTX_SHIFT);
2147                         pcid_addr += (i << PHY_CTX_SHIFT);
2148
2149                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2150                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2151
2152                         /* Zero out the context. */
2153                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2154                                 CTX_WR(bp, vcid_addr, offset, 0);
2155                 }
2156         }
2157 }
2158
2159 static int
2160 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2161 {
2162         u16 *good_mbuf;
2163         u32 good_mbuf_cnt;
2164         u32 val;
2165
2166         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2167         if (good_mbuf == NULL) {
2168                 printk(KERN_ERR PFX "Failed to allocate memory in "
2169                                     "bnx2_alloc_bad_rbuf\n");
2170                 return -ENOMEM;
2171         }
2172
2173         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2174                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2175
2176         good_mbuf_cnt = 0;
2177
2178         /* Allocate a bunch of mbufs and save the good ones in an array. */
2179         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2180         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2181                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2182
2183                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2184
2185                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2186
2187                 /* The addresses with Bit 9 set are bad memory blocks. */
2188                 if (!(val & (1 << 9))) {
2189                         good_mbuf[good_mbuf_cnt] = (u16) val;
2190                         good_mbuf_cnt++;
2191                 }
2192
2193                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2194         }
2195
2196         /* Free the good ones back to the mbuf pool thus discarding
2197          * all the bad ones. */
2198         while (good_mbuf_cnt) {
2199                 good_mbuf_cnt--;
2200
2201                 val = good_mbuf[good_mbuf_cnt];
2202                 val = (val << 9) | val | 1;
2203
2204                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2205         }
2206         kfree(good_mbuf);
2207         return 0;
2208 }
2209
2210 static void
2211 bnx2_set_mac_addr(struct bnx2 *bp)
2212 {
2213         u32 val;
2214         u8 *mac_addr = bp->dev->dev_addr;
2215
2216         val = (mac_addr[0] << 8) | mac_addr[1];
2217
2218         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2219
2220         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2221                 (mac_addr[4] << 8) | mac_addr[5];
2222
2223         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2224 }
2225
2226 static inline int
2227 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2228 {
2229         dma_addr_t mapping;
2230         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2231         struct rx_bd *rxbd =
2232                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2233         struct page *page = alloc_page(GFP_ATOMIC);
2234
2235         if (!page)
2236                 return -ENOMEM;
2237         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2238                                PCI_DMA_FROMDEVICE);
2239         rx_pg->page = page;
2240         pci_unmap_addr_set(rx_pg, mapping, mapping);
2241         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2242         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2243         return 0;
2244 }
2245
2246 static void
2247 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2248 {
2249         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2250         struct page *page = rx_pg->page;
2251
2252         if (!page)
2253                 return;
2254
2255         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2256                        PCI_DMA_FROMDEVICE);
2257
2258         __free_page(page);
2259         rx_pg->page = NULL;
2260 }
2261
2262 static inline int
2263 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2264 {
2265         struct sk_buff *skb;
2266         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2267         dma_addr_t mapping;
2268         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2269         unsigned long align;
2270
2271         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2272         if (skb == NULL) {
2273                 return -ENOMEM;
2274         }
2275
2276         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2277                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2278
2279         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2280                 PCI_DMA_FROMDEVICE);
2281
2282         rx_buf->skb = skb;
2283         pci_unmap_addr_set(rx_buf, mapping, mapping);
2284
2285         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2286         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2287
2288         bp->rx_prod_bseq += bp->rx_buf_use_size;
2289
2290         return 0;
2291 }
2292
2293 static int
2294 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2295 {
2296         struct status_block *sblk = bp->status_blk;
2297         u32 new_link_state, old_link_state;
2298         int is_set = 1;
2299
2300         new_link_state = sblk->status_attn_bits & event;
2301         old_link_state = sblk->status_attn_bits_ack & event;
2302         if (new_link_state != old_link_state) {
2303                 if (new_link_state)
2304                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2305                 else
2306                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2307         } else
2308                 is_set = 0;
2309
2310         return is_set;
2311 }
2312
2313 static void
2314 bnx2_phy_int(struct bnx2 *bp)
2315 {
2316         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2317                 spin_lock(&bp->phy_lock);
2318                 bnx2_set_link(bp);
2319                 spin_unlock(&bp->phy_lock);
2320         }
2321         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2322                 bnx2_set_remote_link(bp);
2323
2324 }
2325
2326 static inline u16
2327 bnx2_get_hw_tx_cons(struct bnx2 *bp)
2328 {
2329         u16 cons;
2330
2331         cons = bp->status_blk->status_tx_quick_consumer_index0;
2332
2333         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2334                 cons++;
2335         return cons;
2336 }
2337
2338 static void
2339 bnx2_tx_int(struct bnx2 *bp)
2340 {
2341         u16 hw_cons, sw_cons, sw_ring_cons;
2342         int tx_free_bd = 0;
2343
2344         hw_cons = bnx2_get_hw_tx_cons(bp);
2345         sw_cons = bp->tx_cons;
2346
2347         while (sw_cons != hw_cons) {
2348                 struct sw_bd *tx_buf;
2349                 struct sk_buff *skb;
2350                 int i, last;
2351
2352                 sw_ring_cons = TX_RING_IDX(sw_cons);
2353
2354                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2355                 skb = tx_buf->skb;
2356
2357                 /* partial BD completions possible with TSO packets */
2358                 if (skb_is_gso(skb)) {
2359                         u16 last_idx, last_ring_idx;
2360
2361                         last_idx = sw_cons +
2362                                 skb_shinfo(skb)->nr_frags + 1;
2363                         last_ring_idx = sw_ring_cons +
2364                                 skb_shinfo(skb)->nr_frags + 1;
2365                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2366                                 last_idx++;
2367                         }
2368                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2369                                 break;
2370                         }
2371                 }
2372
2373                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2374                         skb_headlen(skb), PCI_DMA_TODEVICE);
2375
2376                 tx_buf->skb = NULL;
2377                 last = skb_shinfo(skb)->nr_frags;
2378
2379                 for (i = 0; i < last; i++) {
2380                         sw_cons = NEXT_TX_BD(sw_cons);
2381
2382                         pci_unmap_page(bp->pdev,
2383                                 pci_unmap_addr(
2384                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2385                                         mapping),
2386                                 skb_shinfo(skb)->frags[i].size,
2387                                 PCI_DMA_TODEVICE);
2388                 }
2389
2390                 sw_cons = NEXT_TX_BD(sw_cons);
2391
2392                 tx_free_bd += last + 1;
2393
2394                 dev_kfree_skb(skb);
2395
2396                 hw_cons = bnx2_get_hw_tx_cons(bp);
2397         }
2398
2399         bp->hw_tx_cons = hw_cons;
2400         bp->tx_cons = sw_cons;
2401         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2402          * before checking for netif_queue_stopped().  Without the
2403          * memory barrier, there is a small possibility that bnx2_start_xmit()
2404          * will miss it and cause the queue to be stopped forever.
2405          */
2406         smp_mb();
2407
2408         if (unlikely(netif_queue_stopped(bp->dev)) &&
2409                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2410                 netif_tx_lock(bp->dev);
2411                 if ((netif_queue_stopped(bp->dev)) &&
2412                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2413                         netif_wake_queue(bp->dev);
2414                 netif_tx_unlock(bp->dev);
2415         }
2416 }
2417
2418 static void
2419 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2420 {
2421         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2422         struct rx_bd *cons_bd, *prod_bd;
2423         dma_addr_t mapping;
2424         int i;
2425         u16 hw_prod = bp->rx_pg_prod, prod;
2426         u16 cons = bp->rx_pg_cons;
2427
2428         for (i = 0; i < count; i++) {
2429                 prod = RX_PG_RING_IDX(hw_prod);
2430
2431                 prod_rx_pg = &bp->rx_pg_ring[prod];
2432                 cons_rx_pg = &bp->rx_pg_ring[cons];
2433                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2434                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2435
2436                 if (i == 0 && skb) {
2437                         struct page *page;
2438                         struct skb_shared_info *shinfo;
2439
2440                         shinfo = skb_shinfo(skb);
2441                         shinfo->nr_frags--;
2442                         page = shinfo->frags[shinfo->nr_frags].page;
2443                         shinfo->frags[shinfo->nr_frags].page = NULL;
2444                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2445                                                PCI_DMA_FROMDEVICE);
2446                         cons_rx_pg->page = page;
2447                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2448                         dev_kfree_skb(skb);
2449                 }
2450                 if (prod != cons) {
2451                         prod_rx_pg->page = cons_rx_pg->page;
2452                         cons_rx_pg->page = NULL;
2453                         pci_unmap_addr_set(prod_rx_pg, mapping,
2454                                 pci_unmap_addr(cons_rx_pg, mapping));
2455
2456                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2457                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2458
2459                 }
2460                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2461                 hw_prod = NEXT_RX_BD(hw_prod);
2462         }
2463         bp->rx_pg_prod = hw_prod;
2464         bp->rx_pg_cons = cons;
2465 }
2466
2467 static inline void
2468 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2469         u16 cons, u16 prod)
2470 {
2471         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2472         struct rx_bd *cons_bd, *prod_bd;
2473
2474         cons_rx_buf = &bp->rx_buf_ring[cons];
2475         prod_rx_buf = &bp->rx_buf_ring[prod];
2476
2477         pci_dma_sync_single_for_device(bp->pdev,
2478                 pci_unmap_addr(cons_rx_buf, mapping),
2479                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2480
2481         bp->rx_prod_bseq += bp->rx_buf_use_size;
2482
2483         prod_rx_buf->skb = skb;
2484
2485         if (cons == prod)
2486                 return;
2487
2488         pci_unmap_addr_set(prod_rx_buf, mapping,
2489                         pci_unmap_addr(cons_rx_buf, mapping));
2490
2491         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2492         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2493         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2494         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2495 }
2496
2497 static int
2498 bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2499             unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
2500 {
2501         int err;
2502         u16 prod = ring_idx & 0xffff;
2503
2504         err = bnx2_alloc_rx_skb(bp, prod);
2505         if (unlikely(err)) {
2506                 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2507                 if (hdr_len) {
2508                         unsigned int raw_len = len + 4;
2509                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2510
2511                         bnx2_reuse_rx_skb_pages(bp, NULL, pages);
2512                 }
2513                 return err;
2514         }
2515
2516         skb_reserve(skb, bp->rx_offset);
2517         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2518                          PCI_DMA_FROMDEVICE);
2519
2520         if (hdr_len == 0) {
2521                 skb_put(skb, len);
2522                 return 0;
2523         } else {
2524                 unsigned int i, frag_len, frag_size, pages;
2525                 struct sw_pg *rx_pg;
2526                 u16 pg_cons = bp->rx_pg_cons;
2527                 u16 pg_prod = bp->rx_pg_prod;
2528
2529                 frag_size = len + 4 - hdr_len;
2530                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2531                 skb_put(skb, hdr_len);
2532
2533                 for (i = 0; i < pages; i++) {
2534                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2535                         if (unlikely(frag_len <= 4)) {
2536                                 unsigned int tail = 4 - frag_len;
2537
2538                                 bp->rx_pg_cons = pg_cons;
2539                                 bp->rx_pg_prod = pg_prod;
2540                                 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
2541                                 skb->len -= tail;
2542                                 if (i == 0) {
2543                                         skb->tail -= tail;
2544                                 } else {
2545                                         skb_frag_t *frag =
2546                                                 &skb_shinfo(skb)->frags[i - 1];
2547                                         frag->size -= tail;
2548                                         skb->data_len -= tail;
2549                                         skb->truesize -= tail;
2550                                 }
2551                                 return 0;
2552                         }
2553                         rx_pg = &bp->rx_pg_ring[pg_cons];
2554
2555                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2556                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2557
2558                         if (i == pages - 1)
2559                                 frag_len -= 4;
2560
2561                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2562                         rx_pg->page = NULL;
2563
2564                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2565                         if (unlikely(err)) {
2566                                 bp->rx_pg_cons = pg_cons;
2567                                 bp->rx_pg_prod = pg_prod;
2568                                 bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
2569                                 return err;
2570                         }
2571
2572                         frag_size -= frag_len;
2573                         skb->data_len += frag_len;
2574                         skb->truesize += frag_len;
2575                         skb->len += frag_len;
2576
2577                         pg_prod = NEXT_RX_BD(pg_prod);
2578                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2579                 }
2580                 bp->rx_pg_prod = pg_prod;
2581                 bp->rx_pg_cons = pg_cons;
2582         }
2583         return 0;
2584 }
2585
2586 static inline u16
2587 bnx2_get_hw_rx_cons(struct bnx2 *bp)
2588 {
2589         u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2590
2591         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2592                 cons++;
2593         return cons;
2594 }
2595
2596 static int
2597 bnx2_rx_int(struct bnx2 *bp, int budget)
2598 {
2599         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2600         struct l2_fhdr *rx_hdr;
2601         int rx_pkt = 0, pg_ring_used = 0;
2602
2603         hw_cons = bnx2_get_hw_rx_cons(bp);
2604         sw_cons = bp->rx_cons;
2605         sw_prod = bp->rx_prod;
2606
2607         /* Memory barrier necessary as speculative reads of the rx
2608          * buffer can be ahead of the index in the status block
2609          */
2610         rmb();
2611         while (sw_cons != hw_cons) {
2612                 unsigned int len, hdr_len;
2613                 u32 status;
2614                 struct sw_bd *rx_buf;
2615                 struct sk_buff *skb;
2616                 dma_addr_t dma_addr;
2617
2618                 sw_ring_cons = RX_RING_IDX(sw_cons);
2619                 sw_ring_prod = RX_RING_IDX(sw_prod);
2620
2621                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2622                 skb = rx_buf->skb;
2623
2624                 rx_buf->skb = NULL;
2625
2626                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2627
2628                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2629                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2630
2631                 rx_hdr = (struct l2_fhdr *) skb->data;
2632                 len = rx_hdr->l2_fhdr_pkt_len;
2633
2634                 if ((status = rx_hdr->l2_fhdr_status) &
2635                         (L2_FHDR_ERRORS_BAD_CRC |
2636                         L2_FHDR_ERRORS_PHY_DECODE |
2637                         L2_FHDR_ERRORS_ALIGNMENT |
2638                         L2_FHDR_ERRORS_TOO_SHORT |
2639                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2640
2641                         bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2642                         goto next_rx;
2643                 }
2644                 hdr_len = 0;
2645                 if (status & L2_FHDR_STATUS_SPLIT) {
2646                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2647                         pg_ring_used = 1;
2648                 } else if (len > bp->rx_jumbo_thresh) {
2649                         hdr_len = bp->rx_jumbo_thresh;
2650                         pg_ring_used = 1;
2651                 }
2652
2653                 len -= 4;
2654
2655                 if (len <= bp->rx_copy_thresh) {
2656                         struct sk_buff *new_skb;
2657
2658                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2659                         if (new_skb == NULL) {
2660                                 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2661                                                   sw_ring_prod);
2662                                 goto next_rx;
2663                         }
2664
2665                         /* aligned copy */
2666                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2667                                       new_skb->data, len + 2);
2668                         skb_reserve(new_skb, 2);
2669                         skb_put(new_skb, len);
2670
2671                         bnx2_reuse_rx_skb(bp, skb,
2672                                 sw_ring_cons, sw_ring_prod);
2673
2674                         skb = new_skb;
2675                 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
2676                                     (sw_ring_cons << 16) | sw_ring_prod)))
2677                         goto next_rx;
2678
2679                 skb->protocol = eth_type_trans(skb, bp->dev);
2680
2681                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2682                         (ntohs(skb->protocol) != 0x8100)) {
2683
2684                         dev_kfree_skb(skb);
2685                         goto next_rx;
2686
2687                 }
2688
2689                 skb->ip_summed = CHECKSUM_NONE;
2690                 if (bp->rx_csum &&
2691                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2692                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2693
2694                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2695                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2696                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2697                 }
2698
2699 #ifdef BCM_VLAN
2700                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2701                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2702                                 rx_hdr->l2_fhdr_vlan_tag);
2703                 }
2704                 else
2705 #endif
2706                         netif_receive_skb(skb);
2707
2708                 bp->dev->last_rx = jiffies;
2709                 rx_pkt++;
2710
2711 next_rx:
2712                 sw_cons = NEXT_RX_BD(sw_cons);
2713                 sw_prod = NEXT_RX_BD(sw_prod);
2714
2715                 if ((rx_pkt == budget))
2716                         break;
2717
2718                 /* Refresh hw_cons to see if there is new work */
2719                 if (sw_cons == hw_cons) {
2720                         hw_cons = bnx2_get_hw_rx_cons(bp);
2721                         rmb();
2722                 }
2723         }
2724         bp->rx_cons = sw_cons;
2725         bp->rx_prod = sw_prod;
2726
2727         if (pg_ring_used)
2728                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2729                          bp->rx_pg_prod);
2730
2731         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2732
2733         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2734
2735         mmiowb();
2736
2737         return rx_pkt;
2738
2739 }
2740
2741 /* MSI ISR - The only difference between this and the INTx ISR
2742  * is that the MSI interrupt is always serviced.
2743  */
2744 static irqreturn_t
2745 bnx2_msi(int irq, void *dev_instance)
2746 {
2747         struct net_device *dev = dev_instance;
2748         struct bnx2 *bp = netdev_priv(dev);
2749
2750         prefetch(bp->status_blk);
2751         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2752                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2753                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2754
2755         /* Return here if interrupt is disabled. */
2756         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2757                 return IRQ_HANDLED;
2758
2759         netif_rx_schedule(dev, &bp->napi);
2760
2761         return IRQ_HANDLED;
2762 }
2763
2764 static irqreturn_t
2765 bnx2_msi_1shot(int irq, void *dev_instance)
2766 {
2767         struct net_device *dev = dev_instance;
2768         struct bnx2 *bp = netdev_priv(dev);
2769
2770         prefetch(bp->status_blk);
2771
2772         /* Return here if interrupt is disabled. */
2773         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2774                 return IRQ_HANDLED;
2775
2776         netif_rx_schedule(dev, &bp->napi);
2777
2778         return IRQ_HANDLED;
2779 }
2780
2781 static irqreturn_t
2782 bnx2_interrupt(int irq, void *dev_instance)
2783 {
2784         struct net_device *dev = dev_instance;
2785         struct bnx2 *bp = netdev_priv(dev);
2786         struct status_block *sblk = bp->status_blk;
2787
2788         /* When using INTx, it is possible for the interrupt to arrive
2789          * at the CPU before the status block posted prior to the
2790          * interrupt. Reading a register will flush the status block.
2791          * When using MSI, the MSI message will always complete after
2792          * the status block write.
2793          */
2794         if ((sblk->status_idx == bp->last_status_idx) &&
2795             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2796              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2797                 return IRQ_NONE;
2798
2799         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2800                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2801                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2802
2803         /* Read back to deassert IRQ immediately to avoid too many
2804          * spurious interrupts.
2805          */
2806         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2807
2808         /* Return here if interrupt is shared and is disabled. */
2809         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2810                 return IRQ_HANDLED;
2811
2812         if (netif_rx_schedule_prep(dev, &bp->napi)) {
2813                 bp->last_status_idx = sblk->status_idx;
2814                 __netif_rx_schedule(dev, &bp->napi);
2815         }
2816
2817         return IRQ_HANDLED;
2818 }
2819
2820 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2821                                  STATUS_ATTN_BITS_TIMER_ABORT)
2822
2823 static inline int
2824 bnx2_has_work(struct bnx2 *bp)
2825 {
2826         struct status_block *sblk = bp->status_blk;
2827
2828         if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
2829             (bnx2_get_hw_tx_cons(bp) != bp->hw_tx_cons))
2830                 return 1;
2831
2832         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2833             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2834                 return 1;
2835
2836         return 0;
2837 }
2838
2839 static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2840 {
2841         struct status_block *sblk = bp->status_blk;
2842         u32 status_attn_bits = sblk->status_attn_bits;
2843         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2844
2845         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2846             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2847
2848                 bnx2_phy_int(bp);
2849
2850                 /* This is needed to take care of transient status
2851                  * during link changes.
2852                  */
2853                 REG_WR(bp, BNX2_HC_COMMAND,
2854                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2855                 REG_RD(bp, BNX2_HC_COMMAND);
2856         }
2857
2858         if (bnx2_get_hw_tx_cons(bp) != bp->hw_tx_cons)
2859                 bnx2_tx_int(bp);
2860
2861         if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
2862                 work_done += bnx2_rx_int(bp, budget - work_done);
2863
2864         return work_done;
2865 }
2866
2867 static int bnx2_poll(struct napi_struct *napi, int budget)
2868 {
2869         struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2870         int work_done = 0;
2871         struct status_block *sblk = bp->status_blk;
2872
2873         while (1) {
2874                 work_done = bnx2_poll_work(bp, work_done, budget);
2875
2876                 if (unlikely(work_done >= budget))
2877                         break;
2878
2879                 /* bp->last_status_idx is used below to tell the hw how
2880                  * much work has been processed, so we must read it before
2881                  * checking for more work.
2882                  */
2883                 bp->last_status_idx = sblk->status_idx;
2884                 rmb();
2885                 if (likely(!bnx2_has_work(bp))) {
2886                         netif_rx_complete(bp->dev, napi);
2887                         if (likely(bp->flags & USING_MSI_FLAG)) {
2888                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2889                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2890                                        bp->last_status_idx);
2891                                 break;
2892                         }
2893                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2894                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2895                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2896                                bp->last_status_idx);
2897
2898                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2899                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2900                                bp->last_status_idx);
2901                         break;
2902                 }
2903         }
2904
2905         return work_done;
2906 }
2907
2908 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2909  * from set_multicast.
2910  */
2911 static void
2912 bnx2_set_rx_mode(struct net_device *dev)
2913 {
2914         struct bnx2 *bp = netdev_priv(dev);
2915         u32 rx_mode, sort_mode;
2916         int i;
2917
2918         spin_lock_bh(&bp->phy_lock);
2919
2920         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2921                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2922         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2923 #ifdef BCM_VLAN
2924         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2925                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2926 #else
2927         if (!(bp->flags & ASF_ENABLE_FLAG))
2928                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2929 #endif
2930         if (dev->flags & IFF_PROMISC) {
2931                 /* Promiscuous mode. */
2932                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2933                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2934                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2935         }
2936         else if (dev->flags & IFF_ALLMULTI) {
2937                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2938                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2939                                0xffffffff);
2940                 }
2941                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2942         }
2943         else {
2944                 /* Accept one or more multicast(s). */
2945                 struct dev_mc_list *mclist;
2946                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2947                 u32 regidx;
2948                 u32 bit;
2949                 u32 crc;
2950
2951                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2952
2953                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2954                      i++, mclist = mclist->next) {
2955
2956                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2957                         bit = crc & 0xff;
2958                         regidx = (bit & 0xe0) >> 5;
2959                         bit &= 0x1f;
2960                         mc_filter[regidx] |= (1 << bit);
2961                 }
2962
2963                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2964                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2965                                mc_filter[i]);
2966                 }
2967
2968                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2969         }
2970
2971         if (rx_mode != bp->rx_mode) {
2972                 bp->rx_mode = rx_mode;
2973                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2974         }
2975
2976         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2977         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2978         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2979
2980         spin_unlock_bh(&bp->phy_lock);
2981 }
2982
2983 static void
2984 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2985         u32 rv2p_proc)
2986 {
2987         int i;
2988         u32 val;
2989
2990
2991         for (i = 0; i < rv2p_code_len; i += 8) {
2992                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2993                 rv2p_code++;
2994                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2995                 rv2p_code++;
2996
2997                 if (rv2p_proc == RV2P_PROC1) {
2998                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2999                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3000                 }
3001                 else {
3002                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3003                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3004                 }
3005         }
3006
3007         /* Reset the processor, un-stall is done later. */
3008         if (rv2p_proc == RV2P_PROC1) {
3009                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3010         }
3011         else {
3012                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3013         }
3014 }
3015
3016 static int
3017 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3018 {
3019         u32 offset;
3020         u32 val;
3021         int rc;
3022
3023         /* Halt the CPU. */
3024         val = REG_RD_IND(bp, cpu_reg->mode);
3025         val |= cpu_reg->mode_value_halt;
3026         REG_WR_IND(bp, cpu_reg->mode, val);
3027         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3028
3029         /* Load the Text area. */
3030         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3031         if (fw->gz_text) {
3032                 int j;
3033
3034                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3035                                        fw->gz_text_len);
3036                 if (rc < 0)
3037                         return rc;
3038
3039                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3040                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3041                 }
3042         }
3043
3044         /* Load the Data area. */
3045         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3046         if (fw->data) {
3047                 int j;
3048
3049                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3050                         REG_WR_IND(bp, offset, fw->data[j]);
3051                 }
3052         }
3053
3054         /* Load the SBSS area. */
3055         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3056         if (fw->sbss_len) {
3057                 int j;
3058
3059                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3060                         REG_WR_IND(bp, offset, 0);
3061                 }
3062         }
3063
3064         /* Load the BSS area. */
3065         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3066         if (fw->bss_len) {
3067                 int j;
3068
3069                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3070                         REG_WR_IND(bp, offset, 0);
3071                 }
3072         }
3073
3074         /* Load the Read-Only area. */
3075         offset = cpu_reg->spad_base +
3076                 (fw->rodata_addr - cpu_reg->mips_view_base);
3077         if (fw->rodata) {
3078                 int j;
3079
3080                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3081                         REG_WR_IND(bp, offset, fw->rodata[j]);
3082                 }
3083         }
3084
3085         /* Clear the pre-fetch instruction. */
3086         REG_WR_IND(bp, cpu_reg->inst, 0);
3087         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3088
3089         /* Start the CPU. */
3090         val = REG_RD_IND(bp, cpu_reg->mode);
3091         val &= ~cpu_reg->mode_value_halt;
3092         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3093         REG_WR_IND(bp, cpu_reg->mode, val);
3094
3095         return 0;
3096 }
3097
3098 static int
3099 bnx2_init_cpus(struct bnx2 *bp)
3100 {
3101         struct cpu_reg cpu_reg;
3102         struct fw_info *fw;
3103         int rc, rv2p_len;
3104         void *text, *rv2p;
3105
3106         /* Initialize the RV2P processor. */
3107         text = vmalloc(FW_BUF_SIZE);
3108         if (!text)
3109                 return -ENOMEM;
3110         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3111                 rv2p = bnx2_xi_rv2p_proc1;
3112                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3113         } else {
3114                 rv2p = bnx2_rv2p_proc1;
3115                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3116         }
3117         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3118         if (rc < 0)
3119                 goto init_cpu_err;
3120
3121         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3122
3123         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3124                 rv2p = bnx2_xi_rv2p_proc2;
3125                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3126         } else {
3127                 rv2p = bnx2_rv2p_proc2;
3128                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3129         }
3130         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3131         if (rc < 0)
3132                 goto init_cpu_err;
3133
3134         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3135
3136         /* Initialize the RX Processor. */
3137         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3138         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3139         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3140         cpu_reg.state = BNX2_RXP_CPU_STATE;
3141         cpu_reg.state_value_clear = 0xffffff;
3142         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3143         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3144         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3145         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3146         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3147         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3148         cpu_reg.mips_view_base = 0x8000000;
3149
3150         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3151                 fw = &bnx2_rxp_fw_09;
3152         else
3153                 fw = &bnx2_rxp_fw_06;
3154
3155         fw->text = text;
3156         rc = load_cpu_fw(bp, &cpu_reg, fw);
3157         if (rc)
3158                 goto init_cpu_err;
3159
3160         /* Initialize the TX Processor. */
3161         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3162         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3163         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3164         cpu_reg.state = BNX2_TXP_CPU_STATE;
3165         cpu_reg.state_value_clear = 0xffffff;
3166         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3167         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3168         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3169         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3170         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3171         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3172         cpu_reg.mips_view_base = 0x8000000;
3173
3174         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3175                 fw = &bnx2_txp_fw_09;
3176         else
3177                 fw = &bnx2_txp_fw_06;
3178
3179         fw->text = text;
3180         rc = load_cpu_fw(bp, &cpu_reg, fw);
3181         if (rc)
3182                 goto init_cpu_err;
3183
3184         /* Initialize the TX Patch-up Processor. */
3185         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3186         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3187         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3188         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3189         cpu_reg.state_value_clear = 0xffffff;
3190         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3191         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3192         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3193         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3194         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3195         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3196         cpu_reg.mips_view_base = 0x8000000;
3197
3198         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3199                 fw = &bnx2_tpat_fw_09;
3200         else
3201                 fw = &bnx2_tpat_fw_06;
3202
3203         fw->text = text;
3204         rc = load_cpu_fw(bp, &cpu_reg, fw);
3205         if (rc)
3206                 goto init_cpu_err;
3207
3208         /* Initialize the Completion Processor. */
3209         cpu_reg.mode = BNX2_COM_CPU_MODE;
3210         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3211         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3212         cpu_reg.state = BNX2_COM_CPU_STATE;
3213         cpu_reg.state_value_clear = 0xffffff;
3214         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3215         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3216         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3217         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3218         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3219         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3220         cpu_reg.mips_view_base = 0x8000000;
3221
3222         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3223                 fw = &bnx2_com_fw_09;
3224         else
3225                 fw = &bnx2_com_fw_06;
3226
3227         fw->text = text;
3228         rc = load_cpu_fw(bp, &cpu_reg, fw);
3229         if (rc)
3230                 goto init_cpu_err;
3231
3232         /* Initialize the Command Processor. */
3233         cpu_reg.mode = BNX2_CP_CPU_MODE;
3234         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3235         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3236         cpu_reg.state = BNX2_CP_CPU_STATE;
3237         cpu_reg.state_value_clear = 0xffffff;
3238         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3239         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3240         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3241         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3242         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3243         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3244         cpu_reg.mips_view_base = 0x8000000;
3245
3246         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3247                 fw = &bnx2_cp_fw_09;
3248         else
3249                 fw = &bnx2_cp_fw_06;
3250
3251         fw->text = text;
3252         rc = load_cpu_fw(bp, &cpu_reg, fw);
3253
3254 init_cpu_err:
3255         vfree(text);
3256         return rc;
3257 }
3258
3259 static int
3260 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3261 {
3262         u16 pmcsr;
3263
3264         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3265
3266         switch (state) {
3267         case PCI_D0: {
3268                 u32 val;
3269
3270                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3271                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3272                         PCI_PM_CTRL_PME_STATUS);
3273
3274                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3275                         /* delay required during transition out of D3hot */
3276                         msleep(20);
3277
3278                 val = REG_RD(bp, BNX2_EMAC_MODE);
3279                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3280                 val &= ~BNX2_EMAC_MODE_MPKT;
3281                 REG_WR(bp, BNX2_EMAC_MODE, val);
3282
3283                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3284                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3285                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3286                 break;
3287         }
3288         case PCI_D3hot: {
3289                 int i;
3290                 u32 val, wol_msg;
3291
3292                 if (bp->wol) {
3293                         u32 advertising;
3294                         u8 autoneg;
3295
3296                         autoneg = bp->autoneg;
3297                         advertising = bp->advertising;
3298
3299                         if (bp->phy_port == PORT_TP) {
3300                                 bp->autoneg = AUTONEG_SPEED;
3301                                 bp->advertising = ADVERTISED_10baseT_Half |
3302                                         ADVERTISED_10baseT_Full |
3303                                         ADVERTISED_100baseT_Half |
3304                                         ADVERTISED_100baseT_Full |
3305                                         ADVERTISED_Autoneg;
3306                         }
3307
3308                         spin_lock_bh(&bp->phy_lock);
3309                         bnx2_setup_phy(bp, bp->phy_port);
3310                         spin_unlock_bh(&bp->phy_lock);
3311
3312                         bp->autoneg = autoneg;
3313                         bp->advertising = advertising;
3314
3315                         bnx2_set_mac_addr(bp);
3316
3317                         val = REG_RD(bp, BNX2_EMAC_MODE);
3318
3319                         /* Enable port mode. */
3320                         val &= ~BNX2_EMAC_MODE_PORT;
3321                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3322                                BNX2_EMAC_MODE_ACPI_RCVD |
3323                                BNX2_EMAC_MODE_MPKT;
3324                         if (bp->phy_port == PORT_TP)
3325                                 val |= BNX2_EMAC_MODE_PORT_MII;
3326                         else {
3327                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3328                                 if (bp->line_speed == SPEED_2500)
3329                                         val |= BNX2_EMAC_MODE_25G_MODE;
3330                         }
3331
3332                         REG_WR(bp, BNX2_EMAC_MODE, val);
3333
3334                         /* receive all multicast */
3335                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3336                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3337                                        0xffffffff);
3338                         }
3339                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3340                                BNX2_EMAC_RX_MODE_SORT_MODE);
3341
3342                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3343                               BNX2_RPM_SORT_USER0_MC_EN;
3344                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3345                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3346                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3347                                BNX2_RPM_SORT_USER0_ENA);
3348
3349                         /* Need to enable EMAC and RPM for WOL. */
3350                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3351                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3352                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3353                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3354
3355                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3356                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3357                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3358
3359                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3360                 }
3361                 else {
3362                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3363                 }
3364
3365                 if (!(bp->flags & NO_WOL_FLAG))
3366                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3367
3368                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3369                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3370                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3371
3372                         if (bp->wol)
3373                                 pmcsr |= 3;
3374                 }
3375                 else {
3376                         pmcsr |= 3;
3377                 }
3378                 if (bp->wol) {
3379                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3380                 }
3381                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3382                                       pmcsr);
3383
3384                 /* No more memory access after this point until
3385                  * device is brought back to D0.
3386                  */
3387                 udelay(50);
3388                 break;
3389         }
3390         default:
3391                 return -EINVAL;
3392         }
3393         return 0;
3394 }
3395
3396 static int
3397 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3398 {
3399         u32 val;
3400         int j;
3401
3402         /* Request access to the flash interface. */
3403         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3404         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3405                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3406                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3407                         break;
3408
3409                 udelay(5);
3410         }
3411
3412         if (j >= NVRAM_TIMEOUT_COUNT)
3413                 return -EBUSY;
3414
3415         return 0;
3416 }
3417
3418 static int
3419 bnx2_release_nvram_lock(struct bnx2 *bp)
3420 {
3421         int j;
3422         u32 val;
3423
3424         /* Relinquish nvram interface. */
3425         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3426
3427         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3428                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3429                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3430                         break;
3431
3432                 udelay(5);
3433         }
3434
3435         if (j >= NVRAM_TIMEOUT_COUNT)
3436                 return -EBUSY;
3437
3438         return 0;
3439 }
3440
3441
3442 static int
3443 bnx2_enable_nvram_write(struct bnx2 *bp)
3444 {
3445         u32 val;
3446
3447         val = REG_RD(bp, BNX2_MISC_CFG);
3448         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3449
3450         if (bp->flash_info->flags & BNX2_NV_WREN) {
3451                 int j;
3452
3453                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3454                 REG_WR(bp, BNX2_NVM_COMMAND,
3455                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3456
3457                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3458                         udelay(5);
3459
3460                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3461                         if (val & BNX2_NVM_COMMAND_DONE)
3462                                 break;
3463                 }
3464
3465                 if (j >= NVRAM_TIMEOUT_COUNT)
3466                         return -EBUSY;
3467         }
3468         return 0;
3469 }
3470
3471 static void
3472 bnx2_disable_nvram_write(struct bnx2 *bp)
3473 {
3474         u32 val;
3475
3476         val = REG_RD(bp, BNX2_MISC_CFG);
3477         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3478 }
3479
3480
3481 static void
3482 bnx2_enable_nvram_access(struct bnx2 *bp)
3483 {
3484         u32 val;
3485
3486         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3487         /* Enable both bits, even on read. */
3488         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3489                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3490 }
3491
3492 static void
3493 bnx2_disable_nvram_access(struct bnx2 *bp)
3494 {
3495         u32 val;
3496
3497         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3498         /* Disable both bits, even after read. */
3499         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3500                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3501                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3502 }
3503
3504 static int
3505 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3506 {
3507         u32 cmd;
3508         int j;
3509
3510         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3511                 /* Buffered flash, no erase needed */
3512                 return 0;
3513
3514         /* Build an erase command */
3515         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3516               BNX2_NVM_COMMAND_DOIT;
3517
3518         /* Need to clear DONE bit separately. */
3519         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3520
3521         /* Address of the NVRAM to read from. */
3522         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3523
3524         /* Issue an erase command. */
3525         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3526
3527         /* Wait for completion. */
3528         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3529                 u32 val;
3530
3531                 udelay(5);
3532
3533                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3534                 if (val & BNX2_NVM_COMMAND_DONE)
3535                         break;
3536         }
3537
3538         if (j >= NVRAM_TIMEOUT_COUNT)
3539                 return -EBUSY;
3540
3541         return 0;
3542 }
3543
3544 static int
3545 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3546 {
3547         u32 cmd;
3548         int j;
3549
3550         /* Build the command word. */
3551         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3552
3553         /* Calculate an offset of a buffered flash, not needed for 5709. */
3554         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3555                 offset = ((offset / bp->flash_info->page_size) <<
3556                            bp->flash_info->page_bits) +
3557                           (offset % bp->flash_info->page_size);
3558         }
3559
3560         /* Need to clear DONE bit separately. */
3561         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3562
3563         /* Address of the NVRAM to read from. */
3564         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3565
3566         /* Issue a read command. */
3567         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3568
3569         /* Wait for completion. */
3570         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3571                 u32 val;
3572
3573                 udelay(5);
3574
3575                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3576                 if (val & BNX2_NVM_COMMAND_DONE) {
3577                         val = REG_RD(bp, BNX2_NVM_READ);
3578
3579                         val = be32_to_cpu(val);
3580                         memcpy(ret_val, &val, 4);
3581                         break;
3582                 }
3583         }
3584         if (j >= NVRAM_TIMEOUT_COUNT)
3585                 return -EBUSY;
3586
3587         return 0;
3588 }
3589
3590
3591 static int
3592 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3593 {
3594         u32 cmd, val32;
3595         int j;
3596
3597         /* Build the command word. */
3598         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3599
3600         /* Calculate an offset of a buffered flash, not needed for 5709. */
3601         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3602                 offset = ((offset / bp->flash_info->page_size) <<
3603                           bp->flash_info->page_bits) +
3604                          (offset % bp->flash_info->page_size);
3605         }
3606
3607         /* Need to clear DONE bit separately. */
3608         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3609
3610         memcpy(&val32, val, 4);
3611         val32 = cpu_to_be32(val32);
3612
3613         /* Write the data. */
3614         REG_WR(bp, BNX2_NVM_WRITE, val32);
3615
3616         /* Address of the NVRAM to write to. */
3617         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3618
3619         /* Issue the write command. */
3620         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3621
3622         /* Wait for completion. */
3623         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3624                 udelay(5);
3625
3626                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3627                         break;
3628         }
3629         if (j >= NVRAM_TIMEOUT_COUNT)
3630                 return -EBUSY;
3631
3632         return 0;
3633 }
3634
3635 static int
3636 bnx2_init_nvram(struct bnx2 *bp)
3637 {
3638         u32 val;
3639         int j, entry_count, rc = 0;
3640         struct flash_spec *flash;
3641
3642         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3643                 bp->flash_info = &flash_5709;
3644                 goto get_flash_size;
3645         }
3646
3647         /* Determine the selected interface. */
3648         val = REG_RD(bp, BNX2_NVM_CFG1);
3649
3650         entry_count = ARRAY_SIZE(flash_table);
3651
3652         if (val & 0x40000000) {
3653
3654                 /* Flash interface has been reconfigured */
3655                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3656                      j++, flash++) {
3657                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3658                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3659                                 bp->flash_info = flash;
3660                                 break;
3661                         }
3662                 }
3663         }
3664         else {
3665                 u32 mask;
3666                 /* Not yet been reconfigured */
3667
3668                 if (val & (1 << 23))
3669                         mask = FLASH_BACKUP_STRAP_MASK;
3670                 else
3671                         mask = FLASH_STRAP_MASK;
3672
3673                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3674                         j++, flash++) {
3675
3676                         if ((val & mask) == (flash->strapping & mask)) {
3677                                 bp->flash_info = flash;
3678
3679                                 /* Request access to the flash interface. */
3680                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3681                                         return rc;
3682
3683                                 /* Enable access to flash interface */
3684                                 bnx2_enable_nvram_access(bp);
3685
3686                                 /* Reconfigure the flash interface */
3687                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3688                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3689                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3690                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3691
3692                                 /* Disable access to flash interface */
3693                                 bnx2_disable_nvram_access(bp);
3694                                 bnx2_release_nvram_lock(bp);
3695
3696                                 break;
3697                         }
3698                 }
3699         } /* if (val & 0x40000000) */
3700
3701         if (j == entry_count) {
3702                 bp->flash_info = NULL;
3703                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3704                 return -ENODEV;
3705         }
3706
3707 get_flash_size:
3708         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3709         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3710         if (val)
3711                 bp->flash_size = val;
3712         else
3713                 bp->flash_size = bp->flash_info->total_size;
3714
3715         return rc;
3716 }
3717
3718 static int
3719 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3720                 int buf_size)
3721 {
3722         int rc = 0;
3723         u32 cmd_flags, offset32, len32, extra;
3724
3725         if (buf_size == 0)
3726                 return 0;
3727
3728         /* Request access to the flash interface. */
3729         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3730                 return rc;
3731
3732         /* Enable access to flash interface */
3733         bnx2_enable_nvram_access(bp);
3734
3735         len32 = buf_size;
3736         offset32 = offset;
3737         extra = 0;
3738
3739         cmd_flags = 0;
3740
3741         if (offset32 & 3) {
3742                 u8 buf[4];
3743                 u32 pre_len;
3744
3745                 offset32 &= ~3;
3746                 pre_len = 4 - (offset & 3);
3747
3748                 if (pre_len >= len32) {
3749                         pre_len = len32;
3750                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3751                                     BNX2_NVM_COMMAND_LAST;
3752                 }
3753                 else {
3754                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3755                 }
3756
3757                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3758
3759                 if (rc)
3760                         return rc;
3761
3762                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3763
3764                 offset32 += 4;
3765                 ret_buf += pre_len;
3766                 len32 -= pre_len;
3767         }
3768         if (len32 & 3) {
3769                 extra = 4 - (len32 & 3);
3770                 len32 = (len32 + 4) & ~3;
3771         }
3772
3773         if (len32 == 4) {
3774                 u8 buf[4];
3775
3776                 if (cmd_flags)
3777                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3778                 else
3779                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3780                                     BNX2_NVM_COMMAND_LAST;
3781
3782                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3783
3784                 memcpy(ret_buf, buf, 4 - extra);
3785         }
3786         else if (len32 > 0) {
3787                 u8 buf[4];
3788
3789                 /* Read the first word. */
3790                 if (cmd_flags)
3791                         cmd_flags = 0;
3792                 else
3793                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3794
3795                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3796
3797                 /* Advance to the next dword. */
3798                 offset32 += 4;
3799                 ret_buf += 4;
3800                 len32 -= 4;
3801
3802                 while (len32 > 4 && rc == 0) {
3803                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3804
3805                         /* Advance to the next dword. */
3806                         offset32 += 4;
3807                         ret_buf += 4;
3808                         len32 -= 4;
3809                 }
3810
3811                 if (rc)
3812                         return rc;
3813
3814                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3815                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3816
3817                 memcpy(ret_buf, buf, 4 - extra);
3818         }
3819
3820         /* Disable access to flash interface */
3821         bnx2_disable_nvram_access(bp);
3822
3823         bnx2_release_nvram_lock(bp);
3824
3825         return rc;
3826 }
3827
3828 static int
3829 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3830                 int buf_size)
3831 {
3832         u32 written, offset32, len32;
3833         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3834         int rc = 0;
3835         int align_start, align_end;
3836
3837         buf = data_buf;
3838         offset32 = offset;
3839         len32 = buf_size;
3840         align_start = align_end = 0;
3841
3842         if ((align_start = (offset32 & 3))) {
3843                 offset32 &= ~3;
3844                 len32 += align_start;
3845                 if (len32 < 4)
3846                         len32 = 4;
3847                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3848                         return rc;
3849         }
3850
3851         if (len32 & 3) {
3852                 align_end = 4 - (len32 & 3);
3853                 len32 += align_end;
3854                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3855                         return rc;
3856         }
3857
3858         if (align_start || align_end) {
3859                 align_buf = kmalloc(len32, GFP_KERNEL);
3860                 if (align_buf == NULL)
3861                         return -ENOMEM;
3862                 if (align_start) {
3863                         memcpy(align_buf, start, 4);
3864                 }
3865                 if (align_end) {
3866                         memcpy(align_buf + len32 - 4, end, 4);
3867                 }
3868                 memcpy(align_buf + align_start, data_buf, buf_size);
3869                 buf = align_buf;
3870         }
3871
3872         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3873                 flash_buffer = kmalloc(264, GFP_KERNEL);
3874                 if (flash_buffer == NULL) {
3875                         rc = -ENOMEM;
3876                         goto nvram_write_end;
3877                 }
3878         }
3879
3880         written = 0;
3881         while ((written < len32) && (rc == 0)) {
3882                 u32 page_start, page_end, data_start, data_end;
3883                 u32 addr, cmd_flags;
3884                 int i;
3885
3886                 /* Find the page_start addr */
3887                 page_start = offset32 + written;
3888                 page_start -= (page_start % bp->flash_info->page_size);
3889                 /* Find the page_end addr */
3890                 page_end = page_start + bp->flash_info->page_size;
3891                 /* Find the data_start addr */
3892                 data_start = (written == 0) ? offset32 : page_start;
3893                 /* Find the data_end addr */
3894                 data_end = (page_end > offset32 + len32) ?
3895                         (offset32 + len32) : page_end;
3896
3897                 /* Request access to the flash interface. */
3898                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3899                         goto nvram_write_end;
3900
3901                 /* Enable access to flash interface */
3902                 bnx2_enable_nvram_access(bp);
3903
3904                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3905                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3906                         int j;
3907
3908                         /* Read the whole page into the buffer
3909                          * (non-buffer flash only) */
3910                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3911                                 if (j == (bp->flash_info->page_size - 4)) {
3912                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3913                                 }
3914                                 rc = bnx2_nvram_read_dword(bp,
3915                                         page_start + j,
3916                                         &flash_buffer[j],
3917                                         cmd_flags);
3918
3919                                 if (rc)
3920                                         goto nvram_write_end;
3921
3922                                 cmd_flags = 0;
3923                         }
3924                 }
3925
3926                 /* Enable writes to flash interface (unlock write-protect) */
3927                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3928                         goto nvram_write_end;
3929
3930                 /* Loop to write back the buffer data from page_start to
3931                  * data_start */
3932                 i = 0;
3933                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3934                         /* Erase the page */
3935                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3936                                 goto nvram_write_end;
3937
3938                         /* Re-enable the write again for the actual write */
3939                         bnx2_enable_nvram_write(bp);
3940
3941                         for (addr = page_start; addr < data_start;
3942                                 addr += 4, i += 4) {
3943
3944                                 rc = bnx2_nvram_write_dword(bp, addr,
3945                                         &flash_buffer[i], cmd_flags);
3946
3947                                 if (rc != 0)
3948                                         goto nvram_write_end;
3949
3950                                 cmd_flags = 0;
3951                         }
3952                 }
3953
3954                 /* Loop to write the new data from data_start to data_end */
3955                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3956                         if ((addr == page_end - 4) ||
3957                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3958                                  (addr == data_end - 4))) {
3959
3960                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3961                         }
3962                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3963                                 cmd_flags);
3964
3965                         if (rc != 0)
3966                                 goto nvram_write_end;
3967
3968                         cmd_flags = 0;
3969                         buf += 4;
3970                 }
3971
3972                 /* Loop to write back the buffer data from data_end
3973                  * to page_end */
3974                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3975                         for (addr = data_end; addr < page_end;
3976                                 addr += 4, i += 4) {
3977
3978                                 if (addr == page_end-4) {
3979                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3980                                 }
3981                                 rc = bnx2_nvram_write_dword(bp, addr,
3982                                         &flash_buffer[i], cmd_flags);
3983
3984                                 if (rc != 0)
3985                                         goto nvram_write_end;
3986
3987                                 cmd_flags = 0;
3988                         }
3989                 }
3990
3991                 /* Disable writes to flash interface (lock write-protect) */
3992                 bnx2_disable_nvram_write(bp);
3993
3994                 /* Disable access to flash interface */
3995                 bnx2_disable_nvram_access(bp);
3996                 bnx2_release_nvram_lock(bp);
3997
3998                 /* Increment written */
3999                 written += data_end - data_start;
4000         }
4001
4002 nvram_write_end:
4003         kfree(flash_buffer);
4004         kfree(align_buf);
4005         return rc;
4006 }
4007
4008 static void
4009 bnx2_init_remote_phy(struct bnx2 *bp)
4010 {
4011         u32 val;
4012
4013         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4014         if (!(bp->phy_flags & PHY_SERDES_FLAG))
4015                 return;
4016
4017         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4018         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4019                 return;
4020
4021         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4022                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4023
4024                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4025                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4026                         bp->phy_port = PORT_FIBRE;
4027                 else
4028                         bp->phy_port = PORT_TP;
4029
4030                 if (netif_running(bp->dev)) {
4031                         u32 sig;
4032
4033                         if (val & BNX2_LINK_STATUS_LINK_UP) {
4034                                 bp->link_up = 1;
4035                                 netif_carrier_on(bp->dev);
4036                         } else {
4037                                 bp->link_up = 0;
4038                                 netif_carrier_off(bp->dev);
4039                         }
4040                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4041                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4042                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4043                                    sig);
4044                 }
4045         }
4046 }
4047
4048 static int
4049 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4050 {
4051         u32 val;
4052         int i, rc = 0;
4053         u8 old_port;
4054
4055         /* Wait for the current PCI transaction to complete before
4056          * issuing a reset. */
4057         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4058                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4059                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4060                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4061                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4062         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4063         udelay(5);
4064
4065         /* Wait for the firmware to tell us it is ok to issue a reset. */
4066         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4067
4068         /* Deposit a driver reset signature so the firmware knows that
4069          * this is a soft reset. */
4070         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4071                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
4072
4073         /* Do a dummy read to force the chip to complete all current transaction
4074          * before we issue a reset. */
4075         val = REG_RD(bp, BNX2_MISC_ID);
4076
4077         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4078                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4079                 REG_RD(bp, BNX2_MISC_COMMAND);
4080                 udelay(5);
4081
4082                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4083                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4084
4085                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4086
4087         } else {
4088                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4089                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4090                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4091
4092                 /* Chip reset. */
4093                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4094
4095                 /* Reading back any register after chip reset will hang the
4096                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4097                  * of margin for write posting.
4098                  */
4099                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4100                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4101                         msleep(20);
4102
4103                 /* Reset takes approximate 30 usec */
4104                 for (i = 0; i < 10; i++) {
4105                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4106                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4107                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4108                                 break;
4109                         udelay(10);
4110                 }
4111
4112                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4113                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4114                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4115                         return -EBUSY;
4116                 }
4117         }
4118
4119         /* Make sure byte swapping is properly configured. */
4120         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4121         if (val != 0x01020304) {
4122                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4123                 return -ENODEV;
4124         }
4125
4126         /* Wait for the firmware to finish its initialization. */
4127         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4128         if (rc)
4129                 return rc;
4130
4131         spin_lock_bh(&bp->phy_lock);
4132         old_port = bp->phy_port;
4133         bnx2_init_remote_phy(bp);
4134         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4135                 bnx2_set_default_remote_link(bp);
4136         spin_unlock_bh(&bp->phy_lock);
4137
4138         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4139                 /* Adjust the voltage regular to two steps lower.  The default
4140                  * of this register is 0x0000000e. */
4141                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4142
4143                 /* Remove bad rbuf memory from the free pool. */
4144                 rc = bnx2_alloc_bad_rbuf(bp);
4145         }
4146
4147         return rc;
4148 }
4149
4150 static int
4151 bnx2_init_chip(struct bnx2 *bp)
4152 {
4153         u32 val;
4154         int rc;
4155
4156         /* Make sure the interrupt is not active. */
4157         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4158
4159         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4160               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4161 #ifdef __BIG_ENDIAN
4162               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4163 #endif
4164               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4165               DMA_READ_CHANS << 12 |
4166               DMA_WRITE_CHANS << 16;
4167
4168         val |= (0x2 << 20) | (1 << 11);
4169
4170         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4171                 val |= (1 << 23);
4172
4173         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4174             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4175                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4176
4177         REG_WR(bp, BNX2_DMA_CONFIG, val);
4178
4179         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4180                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4181                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4182                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4183         }
4184
4185         if (bp->flags & PCIX_FLAG) {
4186                 u16 val16;
4187
4188                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4189                                      &val16);
4190                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4191                                       val16 & ~PCI_X_CMD_ERO);
4192         }
4193
4194         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4195                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4196                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4197                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4198
4199         /* Initialize context mapping and zero out the quick contexts.  The
4200          * context block must have already been enabled. */
4201         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4202                 rc = bnx2_init_5709_context(bp);
4203                 if (rc)
4204                         return rc;
4205         } else
4206                 bnx2_init_context(bp);
4207
4208         if ((rc = bnx2_init_cpus(bp)) != 0)
4209                 return rc;
4210
4211         bnx2_init_nvram(bp);
4212
4213         bnx2_set_mac_addr(bp);
4214
4215         val = REG_RD(bp, BNX2_MQ_CONFIG);
4216         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4217         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4218         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4219                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4220
4221         REG_WR(bp, BNX2_MQ_CONFIG, val);
4222
4223         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4224         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4225         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4226
4227         val = (BCM_PAGE_BITS - 8) << 24;
4228         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4229
4230         /* Configure page size. */
4231         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4232         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4233         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4234         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4235
4236         val = bp->mac_addr[0] +
4237               (bp->mac_addr[1] << 8) +
4238               (bp->mac_addr[2] << 16) +
4239               bp->mac_addr[3] +
4240               (bp->mac_addr[4] << 8) +
4241               (bp->mac_addr[5] << 16);
4242         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4243
4244         /* Program the MTU.  Also include 4 bytes for CRC32. */
4245         val = bp->dev->mtu + ETH_HLEN + 4;
4246         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4247                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4248         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4249
4250         bp->last_status_idx = 0;
4251         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4252
4253         /* Set up how to generate a link change interrupt. */
4254         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4255
4256         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4257                (u64) bp->status_blk_mapping & 0xffffffff);
4258         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4259
4260         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4261                (u64) bp->stats_blk_mapping & 0xffffffff);
4262         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4263                (u64) bp->stats_blk_mapping >> 32);
4264
4265         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4266                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4267
4268         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4269                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4270
4271         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4272                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4273
4274         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4275
4276         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4277
4278         REG_WR(bp, BNX2_HC_COM_TICKS,
4279                (bp->com_ticks_int << 16) | bp->com_ticks);
4280
4281         REG_WR(bp, BNX2_HC_CMD_TICKS,
4282                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4283
4284         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4285                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4286         else
4287                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4288         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4289
4290         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4291                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4292         else {
4293                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4294                       BNX2_HC_CONFIG_COLLECT_STATS;
4295         }
4296
4297         if (bp->flags & ONE_SHOT_MSI_FLAG)
4298                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4299
4300         REG_WR(bp, BNX2_HC_CONFIG, val);
4301
4302         /* Clear internal stats counters. */
4303         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4304
4305         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4306
4307         /* Initialize the receive filter. */
4308         bnx2_set_rx_mode(bp->dev);
4309
4310         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4311                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4312                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4313                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4314         }
4315         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4316                           0);
4317
4318         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4319         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4320
4321         udelay(20);
4322
4323         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4324
4325         return rc;
4326 }
4327
4328 static void
4329 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4330 {
4331         u32 val, offset0, offset1, offset2, offset3;
4332
4333         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4334                 offset0 = BNX2_L2CTX_TYPE_XI;
4335                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4336                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4337                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4338         } else {
4339                 offset0 = BNX2_L2CTX_TYPE;
4340                 offset1 = BNX2_L2CTX_CMD_TYPE;
4341                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4342                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4343         }
4344         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4345         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4346
4347         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4348         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4349
4350         val = (u64) bp->tx_desc_mapping >> 32;
4351         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4352
4353         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4354         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4355 }
4356
4357 static void
4358 bnx2_init_tx_ring(struct bnx2 *bp)
4359 {
4360         struct tx_bd *txbd;
4361         u32 cid;
4362
4363         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4364
4365         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4366
4367         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4368         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4369
4370         bp->tx_prod = 0;
4371         bp->tx_cons = 0;
4372         bp->hw_tx_cons = 0;
4373         bp->tx_prod_bseq = 0;
4374
4375         cid = TX_CID;
4376         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4377         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4378
4379         bnx2_init_tx_context(bp, cid);
4380 }
4381
4382 static void
4383 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4384                      int num_rings)
4385 {
4386         int i;
4387         struct rx_bd *rxbd;
4388
4389         for (i = 0; i < num_rings; i++) {
4390                 int j;
4391
4392                 rxbd = &rx_ring[i][0];
4393                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4394                         rxbd->rx_bd_len = buf_size;
4395                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4396                 }
4397                 if (i == (num_rings - 1))
4398                         j = 0;
4399                 else
4400                         j = i + 1;
4401                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4402                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4403         }
4404 }
4405
4406 static void
4407 bnx2_init_rx_ring(struct bnx2 *bp)
4408 {
4409         int i;
4410         u16 prod, ring_prod;
4411         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4412
4413         bp->rx_prod = 0;
4414         bp->rx_cons = 0;
4415         bp->rx_prod_bseq = 0;
4416         bp->rx_pg_prod = 0;
4417         bp->rx_pg_cons = 0;
4418
4419         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4420                              bp->rx_buf_use_size, bp->rx_max_ring);
4421
4422         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4423         if (bp->rx_pg_ring_size) {
4424                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4425                                      bp->rx_pg_desc_mapping,
4426                                      PAGE_SIZE, bp->rx_max_pg_ring);
4427                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4428                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4429                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4430                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4431
4432                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4433                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4434
4435                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4436                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4437
4438                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4439                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4440         }
4441
4442         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4443         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4444         val |= 0x02 << 8;
4445         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4446
4447         val = (u64) bp->rx_desc_mapping[0] >> 32;
4448         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4449
4450         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4451         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4452
4453         ring_prod = prod = bp->rx_pg_prod;
4454         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4455                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4456                         break;
4457                 prod = NEXT_RX_BD(prod);
4458                 ring_prod = RX_PG_RING_IDX(prod);
4459         }
4460         bp->rx_pg_prod = prod;
4461
4462         ring_prod = prod = bp->rx_prod;
4463         for (i = 0; i < bp->rx_ring_size; i++) {
4464                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4465                         break;
4466                 }
4467                 prod = NEXT_RX_BD(prod);
4468                 ring_prod = RX_RING_IDX(prod);
4469         }
4470         bp->rx_prod = prod;
4471
4472         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
4473         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4474
4475         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4476 }
4477
4478 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4479 {
4480         u32 max, num_rings = 1;
4481
4482         while (ring_size > MAX_RX_DESC_CNT) {
4483                 ring_size -= MAX_RX_DESC_CNT;
4484                 num_rings++;
4485         }
4486         /* round to next power of 2 */
4487         max = max_size;
4488         while ((max & num_rings) == 0)
4489                 max >>= 1;
4490
4491         if (num_rings != max)
4492                 max <<= 1;
4493
4494         return max;
4495 }
4496
4497 static void
4498 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4499 {
4500         u32 rx_size, rx_space, jumbo_size;
4501
4502         /* 8 for CRC and VLAN */
4503         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4504
4505         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4506                 sizeof(struct skb_shared_info);
4507
4508         bp->rx_copy_thresh = RX_COPY_THRESH;
4509         bp->rx_pg_ring_size = 0;
4510         bp->rx_max_pg_ring = 0;
4511         bp->rx_max_pg_ring_idx = 0;
4512         if (rx_space > PAGE_SIZE) {
4513                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4514
4515                 jumbo_size = size * pages;
4516                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4517                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4518
4519                 bp->rx_pg_ring_size = jumbo_size;
4520                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4521                                                         MAX_RX_PG_RINGS);
4522                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4523                 rx_size = RX_COPY_THRESH + bp->rx_offset;
4524                 bp->rx_copy_thresh = 0;
4525         }
4526
4527         bp->rx_buf_use_size = rx_size;
4528         /* hw alignment */
4529         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4530         bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4531         bp->rx_ring_size = size;
4532         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4533         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4534 }
4535
4536 static void
4537 bnx2_free_tx_skbs(struct bnx2 *bp)
4538 {
4539         int i;
4540
4541         if (bp->tx_buf_ring == NULL)
4542                 return;
4543
4544         for (i = 0; i < TX_DESC_CNT; ) {
4545                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4546                 struct sk_buff *skb = tx_buf->skb;
4547                 int j, last;
4548
4549                 if (skb == NULL) {
4550                         i++;
4551                         continue;
4552                 }
4553
4554                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4555                         skb_headlen(skb), PCI_DMA_TODEVICE);
4556
4557                 tx_buf->skb = NULL;
4558
4559                 last = skb_shinfo(skb)->nr_frags;
4560                 for (j = 0; j < last; j++) {
4561                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4562                         pci_unmap_page(bp->pdev,
4563                                 pci_unmap_addr(tx_buf, mapping),
4564                                 skb_shinfo(skb)->frags[j].size,
4565                                 PCI_DMA_TODEVICE);
4566                 }
4567                 dev_kfree_skb(skb);
4568                 i += j + 1;
4569         }
4570
4571 }
4572
4573 static void
4574 bnx2_free_rx_skbs(struct bnx2 *bp)
4575 {
4576         int i;
4577
4578         if (bp->rx_buf_ring == NULL)
4579                 return;
4580
4581         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4582                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4583                 struct sk_buff *skb = rx_buf->skb;
4584
4585                 if (skb == NULL)
4586                         continue;
4587
4588                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4589                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4590
4591                 rx_buf->skb = NULL;
4592
4593                 dev_kfree_skb(skb);
4594         }
4595         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4596                 bnx2_free_rx_page(bp, i);
4597 }
4598
4599 static void
4600 bnx2_free_skbs(struct bnx2 *bp)
4601 {
4602         bnx2_free_tx_skbs(bp);
4603         bnx2_free_rx_skbs(bp);
4604 }
4605
4606 static int
4607 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4608 {
4609         int rc;
4610
4611         rc = bnx2_reset_chip(bp, reset_code);
4612         bnx2_free_skbs(bp);
4613         if (rc)
4614                 return rc;
4615
4616         if ((rc = bnx2_init_chip(bp)) != 0)
4617                 return rc;
4618
4619         bnx2_init_tx_ring(bp);
4620         bnx2_init_rx_ring(bp);
4621         return 0;
4622 }
4623
4624 static int
4625 bnx2_init_nic(struct bnx2 *bp)
4626 {
4627         int rc;
4628
4629         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4630                 return rc;
4631
4632         spin_lock_bh(&bp->phy_lock);
4633         bnx2_init_phy(bp);
4634         bnx2_set_link(bp);
4635         spin_unlock_bh(&bp->phy_lock);
4636         return 0;
4637 }
4638
4639 static int
4640 bnx2_test_registers(struct bnx2 *bp)
4641 {
4642         int ret;
4643         int i, is_5709;
4644         static const struct {
4645                 u16   offset;
4646                 u16   flags;
4647 #define BNX2_FL_NOT_5709        1
4648                 u32   rw_mask;
4649                 u32   ro_mask;
4650         } reg_tbl[] = {
4651                 { 0x006c, 0, 0x00000000, 0x0000003f },
4652                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4653                 { 0x0094, 0, 0x00000000, 0x00000000 },
4654
4655                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4656                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4657                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4658                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4659                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4660                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4661                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4662                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4663                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4664
4665                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4666                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4667                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4668                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4669                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4670                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4671
4672                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4673                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4674                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4675
4676                 { 0x1000, 0, 0x00000000, 0x00000001 },
4677                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4678
4679                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4680                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4681                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4682                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4683                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4684                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4685                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4686                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4687                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4688                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4689
4690                 { 0x1800, 0, 0x00000000, 0x00000001 },
4691                 { 0x1804, 0, 0x00000000, 0x00000003 },
4692
4693                 { 0x2800, 0, 0x00000000, 0x00000001 },
4694                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4695                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4696                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4697                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4698                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4699                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4700                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4701                 { 0x2840, 0, 0x00000000, 0xffffffff },
4702                 { 0x2844, 0, 0x00000000, 0xffffffff },
4703                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4704                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4705
4706                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4707                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4708
4709                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4710                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4711                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4712                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4713                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4714                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4715                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4716                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4717                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4718
4719                 { 0x5004, 0, 0x00000000, 0x0000007f },
4720                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4721
4722                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4723                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4724                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4725                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4726                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4727                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4728                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4729                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4730                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4731
4732                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4733                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4734                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4735                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4736                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4737                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4738                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4739                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4740                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4741                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4742                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4743                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4744                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4745                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4746                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4747                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4748                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4749                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4750                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4751                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4752                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4753                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4754                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4755
4756                 { 0xffff, 0, 0x00000000, 0x00000000 },
4757         };
4758
4759         ret = 0;
4760         is_5709 = 0;
4761         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4762                 is_5709 = 1;
4763
4764         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4765                 u32 offset, rw_mask, ro_mask, save_val, val;
4766                 u16 flags = reg_tbl[i].flags;
4767
4768                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4769                         continue;
4770
4771                 offset = (u32) reg_tbl[i].offset;
4772                 rw_mask = reg_tbl[i].rw_mask;
4773                 ro_mask = reg_tbl[i].ro_mask;
4774
4775                 save_val = readl(bp->regview + offset);
4776
4777                 writel(0, bp->regview + offset);
4778
4779                 val = readl(bp->regview + offset);
4780                 if ((val & rw_mask) != 0) {
4781                         goto reg_test_err;
4782                 }
4783
4784                 if ((val & ro_mask) != (save_val & ro_mask)) {
4785                         goto reg_test_err;
4786                 }
4787
4788                 writel(0xffffffff, bp->regview + offset);
4789
4790                 val = readl(bp->regview + offset);
4791                 if ((val & rw_mask) != rw_mask) {
4792                         goto reg_test_err;
4793                 }
4794
4795                 if ((val & ro_mask) != (save_val & ro_mask)) {
4796                         goto reg_test_err;
4797                 }
4798
4799                 writel(save_val, bp->regview + offset);
4800                 continue;
4801
4802 reg_test_err:
4803                 writel(save_val, bp->regview + offset);
4804                 ret = -ENODEV;
4805                 break;
4806         }
4807         return ret;
4808 }
4809
4810 static int
4811 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4812 {
4813         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4814                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4815         int i;
4816
4817         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4818                 u32 offset;
4819
4820                 for (offset = 0; offset < size; offset += 4) {
4821
4822                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4823
4824                         if (REG_RD_IND(bp, start + offset) !=
4825                                 test_pattern[i]) {
4826                                 return -ENODEV;
4827                         }
4828                 }
4829         }
4830         return 0;
4831 }
4832
4833 static int
4834 bnx2_test_memory(struct bnx2 *bp)
4835 {
4836         int ret = 0;
4837         int i;
4838         static struct mem_entry {
4839                 u32   offset;
4840                 u32   len;
4841         } mem_tbl_5706[] = {
4842                 { 0x60000,  0x4000 },
4843                 { 0xa0000,  0x3000 },
4844                 { 0xe0000,  0x4000 },
4845                 { 0x120000, 0x4000 },
4846                 { 0x1a0000, 0x4000 },
4847                 { 0x160000, 0x4000 },
4848                 { 0xffffffff, 0    },
4849         },
4850         mem_tbl_5709[] = {
4851                 { 0x60000,  0x4000 },
4852                 { 0xa0000,  0x3000 },
4853                 { 0xe0000,  0x4000 },
4854                 { 0x120000, 0x4000 },
4855                 { 0x1a0000, 0x4000 },
4856                 { 0xffffffff, 0    },
4857         };
4858         struct mem_entry *mem_tbl;
4859
4860         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4861                 mem_tbl = mem_tbl_5709;
4862         else
4863                 mem_tbl = mem_tbl_5706;
4864
4865         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4866                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4867                         mem_tbl[i].len)) != 0) {
4868                         return ret;
4869                 }
4870         }
4871
4872         return ret;
4873 }
4874
4875 #define BNX2_MAC_LOOPBACK       0
4876 #define BNX2_PHY_LOOPBACK       1
4877
4878 static int
4879 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4880 {
4881         unsigned int pkt_size, num_pkts, i;
4882         struct sk_buff *skb, *rx_skb;
4883         unsigned char *packet;
4884         u16 rx_start_idx, rx_idx;
4885         dma_addr_t map;
4886         struct tx_bd *txbd;
4887         struct sw_bd *rx_buf;
4888         struct l2_fhdr *rx_hdr;
4889         int ret = -ENODEV;
4890
4891         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4892                 bp->loopback = MAC_LOOPBACK;
4893                 bnx2_set_mac_loopback(bp);
4894         }
4895         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4896                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4897                         return 0;
4898
4899                 bp->loopback = PHY_LOOPBACK;
4900                 bnx2_set_phy_loopback(bp);
4901         }
4902         else
4903                 return -EINVAL;
4904
4905         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
4906         skb = netdev_alloc_skb(bp->dev, pkt_size);
4907         if (!skb)
4908                 return -ENOMEM;
4909         packet = skb_put(skb, pkt_size);
4910         memcpy(packet, bp->dev->dev_addr, 6);
4911         memset(packet + 6, 0x0, 8);
4912         for (i = 14; i < pkt_size; i++)
4913                 packet[i] = (unsigned char) (i & 0xff);
4914
4915         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4916                 PCI_DMA_TODEVICE);
4917
4918         REG_WR(bp, BNX2_HC_COMMAND,
4919                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4920
4921         REG_RD(bp, BNX2_HC_COMMAND);
4922
4923         udelay(5);
4924         rx_start_idx = bnx2_get_hw_rx_cons(bp);
4925
4926         num_pkts = 0;
4927
4928         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4929
4930         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4931         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4932         txbd->tx_bd_mss_nbytes = pkt_size;
4933         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4934
4935         num_pkts++;
4936         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4937         bp->tx_prod_bseq += pkt_size;
4938
4939         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4940         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4941
4942         udelay(100);
4943
4944         REG_WR(bp, BNX2_HC_COMMAND,
4945                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4946
4947         REG_RD(bp, BNX2_HC_COMMAND);
4948
4949         udelay(5);
4950
4951         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4952         dev_kfree_skb(skb);
4953
4954         if (bnx2_get_hw_tx_cons(bp) != bp->tx_prod)
4955                 goto loopback_test_done;
4956
4957         rx_idx = bnx2_get_hw_rx_cons(bp);
4958         if (rx_idx != rx_start_idx + num_pkts) {
4959                 goto loopback_test_done;
4960         }
4961
4962         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4963         rx_skb = rx_buf->skb;
4964
4965         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4966         skb_reserve(rx_skb, bp->rx_offset);
4967
4968         pci_dma_sync_single_for_cpu(bp->pdev,
4969                 pci_unmap_addr(rx_buf, mapping),
4970                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4971
4972         if (rx_hdr->l2_fhdr_status &
4973                 (L2_FHDR_ERRORS_BAD_CRC |
4974                 L2_FHDR_ERRORS_PHY_DECODE |
4975                 L2_FHDR_ERRORS_ALIGNMENT |
4976                 L2_FHDR_ERRORS_TOO_SHORT |
4977                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4978
4979                 goto loopback_test_done;
4980         }
4981
4982         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4983                 goto loopback_test_done;
4984         }
4985
4986         for (i = 14; i < pkt_size; i++) {
4987                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4988                         goto loopback_test_done;
4989                 }
4990         }
4991
4992         ret = 0;
4993
4994 loopback_test_done:
4995         bp->loopback = 0;
4996         return ret;
4997 }
4998
4999 #define BNX2_MAC_LOOPBACK_FAILED        1
5000 #define BNX2_PHY_LOOPBACK_FAILED        2
5001 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5002                                          BNX2_PHY_LOOPBACK_FAILED)
5003
5004 static int
5005 bnx2_test_loopback(struct bnx2 *bp)
5006 {
5007         int rc = 0;
5008
5009         if (!netif_running(bp->dev))
5010                 return BNX2_LOOPBACK_FAILED;
5011
5012         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5013         spin_lock_bh(&bp->phy_lock);
5014         bnx2_init_phy(bp);
5015         spin_unlock_bh(&bp->phy_lock);
5016         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5017                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5018         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5019                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5020         return rc;
5021 }
5022
5023 #define NVRAM_SIZE 0x200
5024 #define CRC32_RESIDUAL 0xdebb20e3
5025
5026 static int
5027 bnx2_test_nvram(struct bnx2 *bp)
5028 {
5029         u32 buf[NVRAM_SIZE / 4];
5030         u8 *data = (u8 *) buf;
5031         int rc = 0;
5032         u32 magic, csum;
5033
5034         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5035                 goto test_nvram_done;
5036
5037         magic = be32_to_cpu(buf[0]);
5038         if (magic != 0x669955aa) {
5039                 rc = -ENODEV;
5040                 goto test_nvram_done;
5041         }
5042
5043         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5044                 goto test_nvram_done;
5045
5046         csum = ether_crc_le(0x100, data);
5047         if (csum != CRC32_RESIDUAL) {
5048                 rc = -ENODEV;
5049                 goto test_nvram_done;
5050         }
5051
5052         csum = ether_crc_le(0x100, data + 0x100);
5053         if (csum != CRC32_RESIDUAL) {
5054                 rc = -ENODEV;
5055         }
5056
5057 test_nvram_done:
5058         return rc;
5059 }
5060
5061 static int
5062 bnx2_test_link(struct bnx2 *bp)
5063 {
5064         u32 bmsr;
5065
5066         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5067                 if (bp->link_up)
5068                         return 0;
5069                 return -ENODEV;
5070         }
5071         spin_lock_bh(&bp->phy_lock);
5072         bnx2_enable_bmsr1(bp);
5073         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5074         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5075         bnx2_disable_bmsr1(bp);
5076         spin_unlock_bh(&bp->phy_lock);
5077
5078         if (bmsr & BMSR_LSTATUS) {
5079                 return 0;
5080         }
5081         return -ENODEV;
5082 }
5083
5084 static int
5085 bnx2_test_intr(struct bnx2 *bp)
5086 {
5087         int i;
5088         u16 status_idx;
5089
5090         if (!netif_running(bp->dev))
5091                 return -ENODEV;
5092
5093         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5094
5095         /* This register is not touched during run-time. */
5096         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5097         REG_RD(bp, BNX2_HC_COMMAND);
5098
5099         for (i = 0; i < 10; i++) {
5100                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5101                         status_idx) {
5102
5103                         break;
5104                 }
5105
5106                 msleep_interruptible(10);
5107         }
5108         if (i < 10)
5109                 return 0;
5110
5111         return -ENODEV;
5112 }
5113
5114 static void
5115 bnx2_5706_serdes_timer(struct bnx2 *bp)
5116 {
5117         spin_lock(&bp->phy_lock);
5118         if (bp->serdes_an_pending)
5119                 bp->serdes_an_pending--;
5120         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5121                 u32 bmcr;
5122
5123                 bp->current_interval = bp->timer_interval;
5124
5125                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5126
5127                 if (bmcr & BMCR_ANENABLE) {
5128                         u32 phy1, phy2;
5129
5130                         bnx2_write_phy(bp, 0x1c, 0x7c00);
5131                         bnx2_read_phy(bp, 0x1c, &phy1);
5132
5133                         bnx2_write_phy(bp, 0x17, 0x0f01);
5134                         bnx2_read_phy(bp, 0x15, &phy2);
5135                         bnx2_write_phy(bp, 0x17, 0x0f01);
5136                         bnx2_read_phy(bp, 0x15, &phy2);
5137
5138                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
5139                                 !(phy2 & 0x20)) {       /* no CONFIG */
5140
5141                                 bmcr &= ~BMCR_ANENABLE;
5142                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5143                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5144                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5145                         }
5146                 }
5147         }
5148         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5149                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5150                 u32 phy2;
5151
5152                 bnx2_write_phy(bp, 0x17, 0x0f01);
5153                 bnx2_read_phy(bp, 0x15, &phy2);
5154                 if (phy2 & 0x20) {
5155                         u32 bmcr;
5156
5157                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5158                         bmcr |= BMCR_ANENABLE;
5159                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5160
5161                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5162                 }
5163         } else
5164                 bp->current_interval = bp->timer_interval;
5165
5166         spin_unlock(&bp->phy_lock);
5167 }
5168
5169 static void
5170 bnx2_5708_serdes_timer(struct bnx2 *bp)
5171 {
5172         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5173                 return;
5174
5175         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5176                 bp->serdes_an_pending = 0;
5177                 return;
5178         }
5179
5180         spin_lock(&bp->phy_lock);
5181         if (bp->serdes_an_pending)
5182                 bp->serdes_an_pending--;
5183         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5184                 u32 bmcr;
5185
5186                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5187                 if (bmcr & BMCR_ANENABLE) {
5188                         bnx2_enable_forced_2g5(bp);
5189                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5190                 } else {
5191                         bnx2_disable_forced_2g5(bp);
5192                         bp->serdes_an_pending = 2;
5193                         bp->current_interval = bp->timer_interval;
5194                 }
5195
5196         } else
5197                 bp->current_interval = bp->timer_interval;
5198
5199         spin_unlock(&bp->phy_lock);
5200 }
5201
5202 static void
5203 bnx2_timer(unsigned long data)
5204 {
5205         struct bnx2 *bp = (struct bnx2 *) data;
5206
5207         if (!netif_running(bp->dev))
5208                 return;
5209
5210         if (atomic_read(&bp->intr_sem) != 0)
5211                 goto bnx2_restart_timer;
5212
5213         bnx2_send_heart_beat(bp);
5214
5215         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5216
5217         /* workaround occasional corrupted counters */
5218         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5219                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5220                                             BNX2_HC_COMMAND_STATS_NOW);
5221
5222         if (bp->phy_flags & PHY_SERDES_FLAG) {
5223                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5224                         bnx2_5706_serdes_timer(bp);
5225                 else
5226                         bnx2_5708_serdes_timer(bp);
5227         }
5228
5229 bnx2_restart_timer:
5230         mod_timer(&bp->timer, jiffies + bp->current_interval);
5231 }
5232
5233 static int
5234 bnx2_request_irq(struct bnx2 *bp)
5235 {
5236         struct net_device *dev = bp->dev;
5237         int rc = 0;
5238
5239         if (bp->flags & USING_MSI_FLAG) {
5240                 irq_handler_t   fn = bnx2_msi;
5241
5242                 if (bp->flags & ONE_SHOT_MSI_FLAG)
5243                         fn = bnx2_msi_1shot;
5244
5245                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
5246         } else
5247                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
5248                                  IRQF_SHARED, dev->name, dev);
5249         return rc;
5250 }
5251
5252 static void
5253 bnx2_free_irq(struct bnx2 *bp)
5254 {
5255         struct net_device *dev = bp->dev;
5256
5257         if (bp->flags & USING_MSI_FLAG) {
5258                 free_irq(bp->pdev->irq, dev);
5259                 pci_disable_msi(bp->pdev);
5260                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5261         } else
5262                 free_irq(bp->pdev->irq, dev);
5263 }
5264
5265 /* Called with rtnl_lock */
5266 static int
5267 bnx2_open(struct net_device *dev)
5268 {
5269         struct bnx2 *bp = netdev_priv(dev);
5270         int rc;
5271
5272         netif_carrier_off(dev);
5273
5274         bnx2_set_power_state(bp, PCI_D0);
5275         bnx2_disable_int(bp);
5276
5277         rc = bnx2_alloc_mem(bp);
5278         if (rc)
5279                 return rc;
5280
5281         napi_enable(&bp->napi);
5282
5283         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5284                 if (pci_enable_msi(bp->pdev) == 0) {
5285                         bp->flags |= USING_MSI_FLAG;
5286                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5287                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5288                 }
5289         }
5290         rc = bnx2_request_irq(bp);
5291
5292         if (rc) {
5293                 napi_disable(&bp->napi);
5294                 bnx2_free_mem(bp);
5295                 return rc;
5296         }
5297
5298         rc = bnx2_init_nic(bp);
5299
5300         if (rc) {
5301                 napi_disable(&bp->napi);
5302                 bnx2_free_irq(bp);
5303                 bnx2_free_skbs(bp);
5304                 bnx2_free_mem(bp);
5305                 return rc;
5306         }
5307
5308         mod_timer(&bp->timer, jiffies + bp->current_interval);
5309
5310         atomic_set(&bp->intr_sem, 0);
5311
5312         bnx2_enable_int(bp);
5313
5314         if (bp->flags & USING_MSI_FLAG) {
5315                 /* Test MSI to make sure it is working
5316                  * If MSI test fails, go back to INTx mode
5317                  */
5318                 if (bnx2_test_intr(bp) != 0) {
5319                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5320                                " using MSI, switching to INTx mode. Please"
5321                                " report this failure to the PCI maintainer"
5322                                " and include system chipset information.\n",
5323                                bp->dev->name);
5324
5325                         bnx2_disable_int(bp);
5326                         bnx2_free_irq(bp);
5327
5328                         rc = bnx2_init_nic(bp);
5329
5330                         if (!rc)
5331                                 rc = bnx2_request_irq(bp);
5332
5333                         if (rc) {
5334                                 napi_disable(&bp->napi);
5335                                 bnx2_free_skbs(bp);
5336                                 bnx2_free_mem(bp);
5337                                 del_timer_sync(&bp->timer);
5338                                 return rc;
5339                         }
5340                         bnx2_enable_int(bp);
5341                 }
5342         }
5343         if (bp->flags & USING_MSI_FLAG) {
5344                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5345         }
5346
5347         netif_start_queue(dev);
5348
5349         return 0;
5350 }
5351
5352 static void
5353 bnx2_reset_task(struct work_struct *work)
5354 {
5355         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5356
5357         if (!netif_running(bp->dev))
5358                 return;
5359
5360         bp->in_reset_task = 1;
5361         bnx2_netif_stop(bp);
5362
5363         bnx2_init_nic(bp);
5364
5365         atomic_set(&bp->intr_sem, 1);
5366         bnx2_netif_start(bp);
5367         bp->in_reset_task = 0;
5368 }
5369
5370 static void
5371 bnx2_tx_timeout(struct net_device *dev)
5372 {
5373         struct bnx2 *bp = netdev_priv(dev);
5374
5375         /* This allows the netif to be shutdown gracefully before resetting */
5376         schedule_work(&bp->reset_task);
5377 }
5378
5379 #ifdef BCM_VLAN
5380 /* Called with rtnl_lock */
5381 static void
5382 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5383 {
5384         struct bnx2 *bp = netdev_priv(dev);
5385
5386         bnx2_netif_stop(bp);
5387
5388         bp->vlgrp = vlgrp;
5389         bnx2_set_rx_mode(dev);
5390
5391         bnx2_netif_start(bp);
5392 }
5393 #endif
5394
5395 /* Called with netif_tx_lock.
5396  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5397  * netif_wake_queue().
5398  */
5399 static int
5400 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5401 {
5402         struct bnx2 *bp = netdev_priv(dev);
5403         dma_addr_t mapping;
5404         struct tx_bd *txbd;
5405         struct sw_bd *tx_buf;
5406         u32 len, vlan_tag_flags, last_frag, mss;
5407         u16 prod, ring_prod;
5408         int i;
5409
5410         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5411                 netif_stop_queue(dev);
5412                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5413                         dev->name);
5414
5415                 return NETDEV_TX_BUSY;
5416         }
5417         len = skb_headlen(skb);
5418         prod = bp->tx_prod;
5419         ring_prod = TX_RING_IDX(prod);
5420
5421         vlan_tag_flags = 0;
5422         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5423                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5424         }
5425
5426         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5427                 vlan_tag_flags |=
5428                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5429         }
5430         if ((mss = skb_shinfo(skb)->gso_size)) {
5431                 u32 tcp_opt_len, ip_tcp_len;
5432                 struct iphdr *iph;
5433
5434                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5435
5436                 tcp_opt_len = tcp_optlen(skb);
5437
5438                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5439                         u32 tcp_off = skb_transport_offset(skb) -
5440                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5441
5442                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5443                                           TX_BD_FLAGS_SW_FLAGS;
5444                         if (likely(tcp_off == 0))
5445                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5446                         else {
5447                                 tcp_off >>= 3;
5448                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5449                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5450                                                   ((tcp_off & 0x10) <<
5451                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5452                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5453                         }
5454                 } else {
5455                         if (skb_header_cloned(skb) &&
5456                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5457                                 dev_kfree_skb(skb);
5458                                 return NETDEV_TX_OK;
5459                         }
5460
5461                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5462
5463                         iph = ip_hdr(skb);
5464                         iph->check = 0;
5465                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5466                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5467                                                                  iph->daddr, 0,
5468                                                                  IPPROTO_TCP,
5469                                                                  0);
5470                         if (tcp_opt_len || (iph->ihl > 5)) {
5471                                 vlan_tag_flags |= ((iph->ihl - 5) +
5472                                                    (tcp_opt_len >> 2)) << 8;
5473                         }
5474                 }
5475         } else
5476                 mss = 0;
5477
5478         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5479
5480         tx_buf = &bp->tx_buf_ring[ring_prod];
5481         tx_buf->skb = skb;
5482         pci_unmap_addr_set(tx_buf, mapping, mapping);
5483
5484         txbd = &bp->tx_desc_ring[ring_prod];
5485
5486         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5487         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5488         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5489         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5490
5491         last_frag = skb_shinfo(skb)->nr_frags;
5492
5493         for (i = 0; i < last_frag; i++) {
5494                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5495
5496                 prod = NEXT_TX_BD(prod);
5497                 ring_prod = TX_RING_IDX(prod);
5498                 txbd = &bp->tx_desc_ring[ring_prod];
5499
5500                 len = frag->size;
5501                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5502                         len, PCI_DMA_TODEVICE);
5503                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5504                                 mapping, mapping);
5505
5506                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5507                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5508                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5509                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5510
5511         }
5512         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5513
5514         prod = NEXT_TX_BD(prod);
5515         bp->tx_prod_bseq += skb->len;
5516
5517         REG_WR16(bp, bp->tx_bidx_addr, prod);
5518         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5519
5520         mmiowb();
5521
5522         bp->tx_prod = prod;
5523         dev->trans_start = jiffies;
5524
5525         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5526                 netif_stop_queue(dev);
5527                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5528                         netif_wake_queue(dev);
5529         }
5530
5531         return NETDEV_TX_OK;
5532 }
5533
5534 /* Called with rtnl_lock */
5535 static int
5536 bnx2_close(struct net_device *dev)
5537 {
5538         struct bnx2 *bp = netdev_priv(dev);
5539         u32 reset_code;
5540
5541         /* Calling flush_scheduled_work() may deadlock because
5542          * linkwatch_event() may be on the workqueue and it will try to get
5543          * the rtnl_lock which we are holding.
5544          */
5545         while (bp->in_reset_task)
5546                 msleep(1);
5547
5548         bnx2_disable_int_sync(bp);
5549         napi_disable(&bp->napi);
5550         del_timer_sync(&bp->timer);
5551         if (bp->flags & NO_WOL_FLAG)
5552                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5553         else if (bp->wol)
5554                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5555         else
5556                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5557         bnx2_reset_chip(bp, reset_code);
5558         bnx2_free_irq(bp);
5559         bnx2_free_skbs(bp);
5560         bnx2_free_mem(bp);
5561         bp->link_up = 0;
5562         netif_carrier_off(bp->dev);
5563         bnx2_set_power_state(bp, PCI_D3hot);
5564         return 0;
5565 }
5566
5567 #define GET_NET_STATS64(ctr)                                    \
5568         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5569         (unsigned long) (ctr##_lo)
5570
5571 #define GET_NET_STATS32(ctr)            \
5572         (ctr##_lo)
5573
5574 #if (BITS_PER_LONG == 64)
5575 #define GET_NET_STATS   GET_NET_STATS64
5576 #else
5577 #define GET_NET_STATS   GET_NET_STATS32
5578 #endif
5579
5580 static struct net_device_stats *
5581 bnx2_get_stats(struct net_device *dev)
5582 {
5583         struct bnx2 *bp = netdev_priv(dev);
5584         struct statistics_block *stats_blk = bp->stats_blk;
5585         struct net_device_stats *net_stats = &bp->net_stats;
5586
5587         if (bp->stats_blk == NULL) {
5588                 return net_stats;
5589         }
5590         net_stats->rx_packets =
5591                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5592                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5593                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5594
5595         net_stats->tx_packets =
5596                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5597                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5598                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5599
5600         net_stats->rx_bytes =
5601                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5602
5603         net_stats->tx_bytes =
5604                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5605
5606         net_stats->multicast =
5607                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5608
5609         net_stats->collisions =
5610                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5611
5612         net_stats->rx_length_errors =
5613                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5614                 stats_blk->stat_EtherStatsOverrsizePkts);
5615
5616         net_stats->rx_over_errors =
5617                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5618
5619         net_stats->rx_frame_errors =
5620                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5621
5622         net_stats->rx_crc_errors =
5623                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5624
5625         net_stats->rx_errors = net_stats->rx_length_errors +
5626                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5627                 net_stats->rx_crc_errors;
5628
5629         net_stats->tx_aborted_errors =
5630                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5631                 stats_blk->stat_Dot3StatsLateCollisions);
5632
5633         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5634             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5635                 net_stats->tx_carrier_errors = 0;
5636         else {
5637                 net_stats->tx_carrier_errors =
5638                         (unsigned long)
5639                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5640         }
5641
5642         net_stats->tx_errors =
5643                 (unsigned long)
5644                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5645                 +
5646                 net_stats->tx_aborted_errors +
5647                 net_stats->tx_carrier_errors;
5648
5649         net_stats->rx_missed_errors =
5650                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5651                 stats_blk->stat_FwRxDrop);
5652
5653         return net_stats;
5654 }
5655
5656 /* All ethtool functions called with rtnl_lock */
5657
5658 static int
5659 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5660 {
5661         struct bnx2 *bp = netdev_priv(dev);
5662         int support_serdes = 0, support_copper = 0;
5663
5664         cmd->supported = SUPPORTED_Autoneg;
5665         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5666                 support_serdes = 1;
5667                 support_copper = 1;
5668         } else if (bp->phy_port == PORT_FIBRE)
5669                 support_serdes = 1;
5670         else
5671                 support_copper = 1;
5672
5673         if (support_serdes) {
5674                 cmd->supported |= SUPPORTED_1000baseT_Full |
5675                         SUPPORTED_FIBRE;
5676                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5677                         cmd->supported |= SUPPORTED_2500baseX_Full;
5678
5679         }
5680         if (support_copper) {
5681                 cmd->supported |= SUPPORTED_10baseT_Half |
5682                         SUPPORTED_10baseT_Full |
5683                         SUPPORTED_100baseT_Half |
5684                         SUPPORTED_100baseT_Full |
5685                         SUPPORTED_1000baseT_Full |
5686                         SUPPORTED_TP;
5687
5688         }
5689
5690         spin_lock_bh(&bp->phy_lock);
5691         cmd->port = bp->phy_port;
5692         cmd->advertising = bp->advertising;
5693
5694         if (bp->autoneg & AUTONEG_SPEED) {
5695                 cmd->autoneg = AUTONEG_ENABLE;
5696         }
5697         else {
5698                 cmd->autoneg = AUTONEG_DISABLE;
5699         }
5700
5701         if (netif_carrier_ok(dev)) {
5702                 cmd->speed = bp->line_speed;
5703                 cmd->duplex = bp->duplex;
5704         }
5705         else {
5706                 cmd->speed = -1;
5707                 cmd->duplex = -1;
5708         }
5709         spin_unlock_bh(&bp->phy_lock);
5710
5711         cmd->transceiver = XCVR_INTERNAL;
5712         cmd->phy_address = bp->phy_addr;
5713
5714         return 0;
5715 }
5716
5717 static int
5718 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5719 {
5720         struct bnx2 *bp = netdev_priv(dev);
5721         u8 autoneg = bp->autoneg;
5722         u8 req_duplex = bp->req_duplex;
5723         u16 req_line_speed = bp->req_line_speed;
5724         u32 advertising = bp->advertising;
5725         int err = -EINVAL;
5726
5727         spin_lock_bh(&bp->phy_lock);
5728
5729         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5730                 goto err_out_unlock;
5731
5732         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5733                 goto err_out_unlock;
5734
5735         if (cmd->autoneg == AUTONEG_ENABLE) {
5736                 autoneg |= AUTONEG_SPEED;
5737
5738                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5739
5740                 /* allow advertising 1 speed */
5741                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5742                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5743                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5744                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5745
5746                         if (cmd->port == PORT_FIBRE)
5747                                 goto err_out_unlock;
5748
5749                         advertising = cmd->advertising;
5750
5751                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5752                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5753                             (cmd->port == PORT_TP))
5754                                 goto err_out_unlock;
5755                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5756                         advertising = cmd->advertising;
5757                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5758                         goto err_out_unlock;
5759                 else {
5760                         if (cmd->port == PORT_FIBRE)
5761                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5762                         else
5763                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5764                 }
5765                 advertising |= ADVERTISED_Autoneg;
5766         }
5767         else {
5768                 if (cmd->port == PORT_FIBRE) {
5769                         if ((cmd->speed != SPEED_1000 &&
5770                              cmd->speed != SPEED_2500) ||
5771                             (cmd->duplex != DUPLEX_FULL))
5772                                 goto err_out_unlock;
5773
5774                         if (cmd->speed == SPEED_2500 &&
5775                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5776                                 goto err_out_unlock;
5777                 }
5778                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5779                         goto err_out_unlock;
5780
5781                 autoneg &= ~AUTONEG_SPEED;
5782                 req_line_speed = cmd->speed;
5783                 req_duplex = cmd->duplex;
5784                 advertising = 0;
5785         }
5786
5787         bp->autoneg = autoneg;
5788         bp->advertising = advertising;
5789         bp->req_line_speed = req_line_speed;
5790         bp->req_duplex = req_duplex;
5791
5792         err = bnx2_setup_phy(bp, cmd->port);
5793
5794 err_out_unlock:
5795         spin_unlock_bh(&bp->phy_lock);
5796
5797         return err;
5798 }
5799
5800 static void
5801 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5802 {
5803         struct bnx2 *bp = netdev_priv(dev);
5804
5805         strcpy(info->driver, DRV_MODULE_NAME);
5806         strcpy(info->version, DRV_MODULE_VERSION);
5807         strcpy(info->bus_info, pci_name(bp->pdev));
5808         strcpy(info->fw_version, bp->fw_version);
5809 }
5810
5811 #define BNX2_REGDUMP_LEN                (32 * 1024)
5812
5813 static int
5814 bnx2_get_regs_len(struct net_device *dev)
5815 {
5816         return BNX2_REGDUMP_LEN;
5817 }
5818
5819 static void
5820 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5821 {
5822         u32 *p = _p, i, offset;
5823         u8 *orig_p = _p;
5824         struct bnx2 *bp = netdev_priv(dev);
5825         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5826                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5827                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5828                                  0x1040, 0x1048, 0x1080, 0x10a4,
5829                                  0x1400, 0x1490, 0x1498, 0x14f0,
5830                                  0x1500, 0x155c, 0x1580, 0x15dc,
5831                                  0x1600, 0x1658, 0x1680, 0x16d8,
5832                                  0x1800, 0x1820, 0x1840, 0x1854,
5833                                  0x1880, 0x1894, 0x1900, 0x1984,
5834                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5835                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5836                                  0x2000, 0x2030, 0x23c0, 0x2400,
5837                                  0x2800, 0x2820, 0x2830, 0x2850,
5838                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5839                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5840                                  0x4080, 0x4090, 0x43c0, 0x4458,
5841                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5842                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5843                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5844                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5845                                  0x6800, 0x6848, 0x684c, 0x6860,
5846                                  0x6888, 0x6910, 0x8000 };
5847
5848         regs->version = 0;
5849
5850         memset(p, 0, BNX2_REGDUMP_LEN);
5851
5852         if (!netif_running(bp->dev))
5853                 return;
5854
5855         i = 0;
5856         offset = reg_boundaries[0];
5857         p += offset;
5858         while (offset < BNX2_REGDUMP_LEN) {
5859                 *p++ = REG_RD(bp, offset);
5860                 offset += 4;
5861                 if (offset == reg_boundaries[i + 1]) {
5862                         offset = reg_boundaries[i + 2];
5863                         p = (u32 *) (orig_p + offset);
5864                         i += 2;
5865                 }
5866         }
5867 }
5868
5869 static void
5870 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5871 {
5872         struct bnx2 *bp = netdev_priv(dev);
5873
5874         if (bp->flags & NO_WOL_FLAG) {
5875                 wol->supported = 0;
5876                 wol->wolopts = 0;
5877         }
5878         else {
5879                 wol->supported = WAKE_MAGIC;
5880                 if (bp->wol)
5881                         wol->wolopts = WAKE_MAGIC;
5882                 else
5883                         wol->wolopts = 0;
5884         }
5885         memset(&wol->sopass, 0, sizeof(wol->sopass));
5886 }
5887
5888 static int
5889 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5890 {
5891         struct bnx2 *bp = netdev_priv(dev);
5892
5893         if (wol->wolopts & ~WAKE_MAGIC)
5894                 return -EINVAL;
5895
5896         if (wol->wolopts & WAKE_MAGIC) {
5897                 if (bp->flags & NO_WOL_FLAG)
5898                         return -EINVAL;
5899
5900                 bp->wol = 1;
5901         }
5902         else {
5903                 bp->wol = 0;
5904         }
5905         return 0;
5906 }
5907
5908 static int
5909 bnx2_nway_reset(struct net_device *dev)
5910 {
5911         struct bnx2 *bp = netdev_priv(dev);
5912         u32 bmcr;
5913
5914         if (!(bp->autoneg & AUTONEG_SPEED)) {
5915                 return -EINVAL;
5916         }
5917
5918         spin_lock_bh(&bp->phy_lock);
5919
5920         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5921                 int rc;
5922
5923                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5924                 spin_unlock_bh(&bp->phy_lock);
5925                 return rc;
5926         }
5927
5928         /* Force a link down visible on the other side */
5929         if (bp->phy_flags & PHY_SERDES_FLAG) {
5930                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5931                 spin_unlock_bh(&bp->phy_lock);
5932
5933                 msleep(20);
5934
5935                 spin_lock_bh(&bp->phy_lock);
5936
5937                 bp->current_interval = SERDES_AN_TIMEOUT;
5938                 bp->serdes_an_pending = 1;
5939                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5940         }
5941
5942         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5943         bmcr &= ~BMCR_LOOPBACK;
5944         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5945
5946         spin_unlock_bh(&bp->phy_lock);
5947
5948         return 0;
5949 }
5950
5951 static int
5952 bnx2_get_eeprom_len(struct net_device *dev)
5953 {
5954         struct bnx2 *bp = netdev_priv(dev);
5955
5956         if (bp->flash_info == NULL)
5957                 return 0;
5958
5959         return (int) bp->flash_size;
5960 }
5961
5962 static int
5963 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5964                 u8 *eebuf)
5965 {
5966         struct bnx2 *bp = netdev_priv(dev);
5967         int rc;
5968
5969         /* parameters already validated in ethtool_get_eeprom */
5970
5971         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5972
5973         return rc;
5974 }
5975
5976 static int
5977 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5978                 u8 *eebuf)
5979 {
5980         struct bnx2 *bp = netdev_priv(dev);
5981         int rc;
5982
5983         /* parameters already validated in ethtool_set_eeprom */
5984
5985         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5986
5987         return rc;
5988 }
5989
5990 static int
5991 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5992 {
5993         struct bnx2 *bp = netdev_priv(dev);
5994
5995         memset(coal, 0, sizeof(struct ethtool_coalesce));
5996
5997         coal->rx_coalesce_usecs = bp->rx_ticks;
5998         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5999         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6000         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6001
6002         coal->tx_coalesce_usecs = bp->tx_ticks;
6003         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6004         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6005         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6006
6007         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6008
6009         return 0;
6010 }
6011
6012 static int
6013 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6014 {
6015         struct bnx2 *bp = netdev_priv(dev);
6016
6017         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6018         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6019
6020         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6021         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6022
6023         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6024         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6025
6026         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6027         if (bp->rx_quick_cons_trip_int > 0xff)
6028                 bp->rx_quick_cons_trip_int = 0xff;
6029
6030         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6031         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6032
6033         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6034         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6035
6036         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6037         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6038
6039         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6040         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6041                 0xff;
6042
6043         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6044         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6045                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6046                         bp->stats_ticks = USEC_PER_SEC;
6047         }
6048         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6049                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6050         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6051
6052         if (netif_running(bp->dev)) {
6053                 bnx2_netif_stop(bp);
6054                 bnx2_init_nic(bp);
6055                 bnx2_netif_start(bp);
6056         }
6057
6058         return 0;
6059 }
6060
6061 static void
6062 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6063 {
6064         struct bnx2 *bp = netdev_priv(dev);
6065
6066         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6067         ering->rx_mini_max_pending = 0;
6068         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6069
6070         ering->rx_pending = bp->rx_ring_size;
6071         ering->rx_mini_pending = 0;
6072         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6073
6074         ering->tx_max_pending = MAX_TX_DESC_CNT;
6075         ering->tx_pending = bp->tx_ring_size;
6076 }
6077
6078 static int
6079 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6080 {
6081         if (netif_running(bp->dev)) {
6082                 bnx2_netif_stop(bp);
6083                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6084                 bnx2_free_skbs(bp);
6085                 bnx2_free_mem(bp);
6086         }
6087
6088         bnx2_set_rx_ring_size(bp, rx);
6089         bp->tx_ring_size = tx;
6090
6091         if (netif_running(bp->dev)) {
6092                 int rc;
6093
6094                 rc = bnx2_alloc_mem(bp);
6095                 if (rc)
6096                         return rc;
6097                 bnx2_init_nic(bp);
6098                 bnx2_netif_start(bp);
6099         }
6100         return 0;
6101 }
6102
6103 static int
6104 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6105 {
6106         struct bnx2 *bp = netdev_priv(dev);
6107         int rc;
6108
6109         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6110                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6111                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6112
6113                 return -EINVAL;
6114         }
6115         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6116         return rc;
6117 }
6118
6119 static void
6120 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6121 {
6122         struct bnx2 *bp = netdev_priv(dev);
6123
6124         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6125         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6126         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6127 }
6128
6129 static int
6130 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6131 {
6132         struct bnx2 *bp = netdev_priv(dev);
6133
6134         bp->req_flow_ctrl = 0;
6135         if (epause->rx_pause)
6136                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6137         if (epause->tx_pause)
6138                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6139
6140         if (epause->autoneg) {
6141                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6142         }
6143         else {
6144                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6145         }
6146
6147         spin_lock_bh(&bp->phy_lock);
6148
6149         bnx2_setup_phy(bp, bp->phy_port);
6150
6151         spin_unlock_bh(&bp->phy_lock);
6152
6153         return 0;
6154 }
6155
6156 static u32
6157 bnx2_get_rx_csum(struct net_device *dev)
6158 {
6159         struct bnx2 *bp = netdev_priv(dev);
6160
6161         return bp->rx_csum;
6162 }
6163
6164 static int
6165 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6166 {
6167         struct bnx2 *bp = netdev_priv(dev);
6168
6169         bp->rx_csum = data;
6170         return 0;
6171 }
6172
6173 static int
6174 bnx2_set_tso(struct net_device *dev, u32 data)
6175 {
6176         struct bnx2 *bp = netdev_priv(dev);
6177
6178         if (data) {
6179                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6180                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6181                         dev->features |= NETIF_F_TSO6;
6182         } else
6183                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6184                                    NETIF_F_TSO_ECN);
6185         return 0;
6186 }
6187
6188 #define BNX2_NUM_STATS 46
6189
6190 static struct {
6191         char string[ETH_GSTRING_LEN];
6192 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6193         { "rx_bytes" },
6194         { "rx_error_bytes" },
6195         { "tx_bytes" },
6196         { "tx_error_bytes" },
6197         { "rx_ucast_packets" },
6198         { "rx_mcast_packets" },
6199         { "rx_bcast_packets" },
6200         { "tx_ucast_packets" },
6201         { "tx_mcast_packets" },
6202         { "tx_bcast_packets" },
6203         { "tx_mac_errors" },
6204         { "tx_carrier_errors" },
6205         { "rx_crc_errors" },
6206         { "rx_align_errors" },
6207         { "tx_single_collisions" },
6208         { "tx_multi_collisions" },
6209         { "tx_deferred" },
6210         { "tx_excess_collisions" },
6211         { "tx_late_collisions" },
6212         { "tx_total_collisions" },
6213         { "rx_fragments" },
6214         { "rx_jabbers" },
6215         { "rx_undersize_packets" },
6216         { "rx_oversize_packets" },
6217         { "rx_64_byte_packets" },
6218         { "rx_65_to_127_byte_packets" },
6219         { "rx_128_to_255_byte_packets" },
6220         { "rx_256_to_511_byte_packets" },
6221         { "rx_512_to_1023_byte_packets" },
6222         { "rx_1024_to_1522_byte_packets" },
6223         { "rx_1523_to_9022_byte_packets" },
6224         { "tx_64_byte_packets" },
6225         { "tx_65_to_127_byte_packets" },
6226         { "tx_128_to_255_byte_packets" },
6227         { "tx_256_to_511_byte_packets" },
6228         { "tx_512_to_1023_byte_packets" },
6229         { "tx_1024_to_1522_byte_packets" },
6230         { "tx_1523_to_9022_byte_packets" },
6231         { "rx_xon_frames" },
6232         { "rx_xoff_frames" },
6233         { "tx_xon_frames" },
6234         { "tx_xoff_frames" },
6235         { "rx_mac_ctrl_frames" },
6236         { "rx_filtered_packets" },
6237         { "rx_discards" },
6238         { "rx_fw_discards" },
6239 };
6240
6241 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6242
6243 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6244     STATS_OFFSET32(stat_IfHCInOctets_hi),
6245     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6246     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6247     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6248     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6249     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6250     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6251     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6252     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6253     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6254     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6255     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6256     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6257     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6258     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6259     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6260     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6261     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6262     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6263     STATS_OFFSET32(stat_EtherStatsCollisions),
6264     STATS_OFFSET32(stat_EtherStatsFragments),
6265     STATS_OFFSET32(stat_EtherStatsJabbers),
6266     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6267     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6268     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6269     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6270     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6271     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6272     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6273     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6274     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6275     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6276     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6277     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6278     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6279     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6280     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6281     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6282     STATS_OFFSET32(stat_XonPauseFramesReceived),
6283     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6284     STATS_OFFSET32(stat_OutXonSent),
6285     STATS_OFFSET32(stat_OutXoffSent),
6286     STATS_OFFSET32(stat_MacControlFramesReceived),
6287     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6288     STATS_OFFSET32(stat_IfInMBUFDiscards),
6289     STATS_OFFSET32(stat_FwRxDrop),
6290 };
6291
6292 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6293  * skipped because of errata.
6294  */
6295 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6296         8,0,8,8,8,8,8,8,8,8,
6297         4,0,4,4,4,4,4,4,4,4,
6298         4,4,4,4,4,4,4,4,4,4,
6299         4,4,4,4,4,4,4,4,4,4,
6300         4,4,4,4,4,4,
6301 };
6302
6303 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6304         8,0,8,8,8,8,8,8,8,8,
6305         4,4,4,4,4,4,4,4,4,4,
6306         4,4,4,4,4,4,4,4,4,4,
6307         4,4,4,4,4,4,4,4,4,4,
6308         4,4,4,4,4,4,
6309 };
6310
6311 #define BNX2_NUM_TESTS 6
6312
6313 static struct {
6314         char string[ETH_GSTRING_LEN];
6315 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6316         { "register_test (offline)" },
6317         { "memory_test (offline)" },
6318         { "loopback_test (offline)" },
6319         { "nvram_test (online)" },
6320         { "interrupt_test (online)" },
6321         { "link_test (online)" },
6322 };
6323
6324 static int
6325 bnx2_get_sset_count(struct net_device *dev, int sset)
6326 {
6327         switch (sset) {
6328         case ETH_SS_TEST:
6329                 return BNX2_NUM_TESTS;
6330         case ETH_SS_STATS:
6331                 return BNX2_NUM_STATS;
6332         default:
6333                 return -EOPNOTSUPP;
6334         }
6335 }
6336
6337 static void
6338 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6339 {
6340         struct bnx2 *bp = netdev_priv(dev);
6341
6342         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6343         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6344                 int i;
6345
6346                 bnx2_netif_stop(bp);
6347                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6348                 bnx2_free_skbs(bp);
6349
6350                 if (bnx2_test_registers(bp) != 0) {
6351                         buf[0] = 1;
6352                         etest->flags |= ETH_TEST_FL_FAILED;
6353                 }
6354                 if (bnx2_test_memory(bp) != 0) {
6355                         buf[1] = 1;
6356                         etest->flags |= ETH_TEST_FL_FAILED;
6357                 }
6358                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6359                         etest->flags |= ETH_TEST_FL_FAILED;
6360
6361                 if (!netif_running(bp->dev)) {
6362                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6363                 }
6364                 else {
6365                         bnx2_init_nic(bp);
6366                         bnx2_netif_start(bp);
6367                 }
6368
6369                 /* wait for link up */
6370                 for (i = 0; i < 7; i++) {
6371                         if (bp->link_up)
6372                                 break;
6373                         msleep_interruptible(1000);
6374                 }
6375         }
6376
6377         if (bnx2_test_nvram(bp) != 0) {
6378                 buf[3] = 1;
6379                 etest->flags |= ETH_TEST_FL_FAILED;
6380         }
6381         if (bnx2_test_intr(bp) != 0) {
6382                 buf[4] = 1;
6383                 etest->flags |= ETH_TEST_FL_FAILED;
6384         }
6385
6386         if (bnx2_test_link(bp) != 0) {
6387                 buf[5] = 1;
6388                 etest->flags |= ETH_TEST_FL_FAILED;
6389
6390         }
6391 }
6392
6393 static void
6394 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6395 {
6396         switch (stringset) {
6397         case ETH_SS_STATS:
6398                 memcpy(buf, bnx2_stats_str_arr,
6399                         sizeof(bnx2_stats_str_arr));
6400                 break;
6401         case ETH_SS_TEST:
6402                 memcpy(buf, bnx2_tests_str_arr,
6403                         sizeof(bnx2_tests_str_arr));
6404                 break;
6405         }
6406 }
6407
6408 static void
6409 bnx2_get_ethtool_stats(struct net_device *dev,
6410                 struct ethtool_stats *stats, u64 *buf)
6411 {
6412         struct bnx2 *bp = netdev_priv(dev);
6413         int i;
6414         u32 *hw_stats = (u32 *) bp->stats_blk;
6415         u8 *stats_len_arr = NULL;
6416
6417         if (hw_stats == NULL) {
6418                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6419                 return;
6420         }
6421
6422         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6423             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6424             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6425             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6426                 stats_len_arr = bnx2_5706_stats_len_arr;
6427         else
6428                 stats_len_arr = bnx2_5708_stats_len_arr;
6429
6430         for (i = 0; i < BNX2_NUM_STATS; i++) {
6431                 if (stats_len_arr[i] == 0) {
6432                         /* skip this counter */
6433                         buf[i] = 0;
6434                         continue;
6435                 }
6436                 if (stats_len_arr[i] == 4) {
6437                         /* 4-byte counter */
6438                         buf[i] = (u64)
6439                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6440                         continue;
6441                 }
6442                 /* 8-byte counter */
6443                 buf[i] = (((u64) *(hw_stats +
6444                                         bnx2_stats_offset_arr[i])) << 32) +
6445                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6446         }
6447 }
6448
6449 static int
6450 bnx2_phys_id(struct net_device *dev, u32 data)
6451 {
6452         struct bnx2 *bp = netdev_priv(dev);
6453         int i;
6454         u32 save;
6455
6456         if (data == 0)
6457                 data = 2;
6458
6459         save = REG_RD(bp, BNX2_MISC_CFG);
6460         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6461
6462         for (i = 0; i < (data * 2); i++) {
6463                 if ((i % 2) == 0) {
6464                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6465                 }
6466                 else {
6467                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6468                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6469                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6470                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6471                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6472                                 BNX2_EMAC_LED_TRAFFIC);
6473                 }
6474                 msleep_interruptible(500);
6475                 if (signal_pending(current))
6476                         break;
6477         }
6478         REG_WR(bp, BNX2_EMAC_LED, 0);
6479         REG_WR(bp, BNX2_MISC_CFG, save);
6480         return 0;
6481 }
6482
6483 static int
6484 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6485 {
6486         struct bnx2 *bp = netdev_priv(dev);
6487
6488         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6489                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6490         else
6491                 return (ethtool_op_set_tx_csum(dev, data));
6492 }
6493
6494 static const struct ethtool_ops bnx2_ethtool_ops = {
6495         .get_settings           = bnx2_get_settings,
6496         .set_settings           = bnx2_set_settings,
6497         .get_drvinfo            = bnx2_get_drvinfo,
6498         .get_regs_len           = bnx2_get_regs_len,
6499         .get_regs               = bnx2_get_regs,
6500         .get_wol                = bnx2_get_wol,
6501         .set_wol                = bnx2_set_wol,
6502         .nway_reset             = bnx2_nway_reset,
6503         .get_link               = ethtool_op_get_link,
6504         .get_eeprom_len         = bnx2_get_eeprom_len,
6505         .get_eeprom             = bnx2_get_eeprom,
6506         .set_eeprom             = bnx2_set_eeprom,
6507         .get_coalesce           = bnx2_get_coalesce,
6508         .set_coalesce           = bnx2_set_coalesce,
6509         .get_ringparam          = bnx2_get_ringparam,
6510         .set_ringparam          = bnx2_set_ringparam,
6511         .get_pauseparam         = bnx2_get_pauseparam,
6512         .set_pauseparam         = bnx2_set_pauseparam,
6513         .get_rx_csum            = bnx2_get_rx_csum,
6514         .set_rx_csum            = bnx2_set_rx_csum,
6515         .set_tx_csum            = bnx2_set_tx_csum,
6516         .set_sg                 = ethtool_op_set_sg,
6517         .set_tso                = bnx2_set_tso,
6518         .self_test              = bnx2_self_test,
6519         .get_strings            = bnx2_get_strings,
6520         .phys_id                = bnx2_phys_id,
6521         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6522         .get_sset_count         = bnx2_get_sset_count,
6523 };
6524
6525 /* Called with rtnl_lock */
6526 static int
6527 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6528 {
6529         struct mii_ioctl_data *data = if_mii(ifr);
6530         struct bnx2 *bp = netdev_priv(dev);
6531         int err;
6532
6533         switch(cmd) {
6534         case SIOCGMIIPHY:
6535                 data->phy_id = bp->phy_addr;
6536
6537                 /* fallthru */
6538         case SIOCGMIIREG: {
6539                 u32 mii_regval;
6540
6541                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6542                         return -EOPNOTSUPP;
6543
6544                 if (!netif_running(dev))
6545                         return -EAGAIN;
6546
6547                 spin_lock_bh(&bp->phy_lock);
6548                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6549                 spin_unlock_bh(&bp->phy_lock);
6550
6551                 data->val_out = mii_regval;
6552
6553                 return err;
6554         }
6555
6556         case SIOCSMIIREG:
6557                 if (!capable(CAP_NET_ADMIN))
6558                         return -EPERM;
6559
6560                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6561                         return -EOPNOTSUPP;
6562
6563                 if (!netif_running(dev))
6564                         return -EAGAIN;
6565
6566                 spin_lock_bh(&bp->phy_lock);
6567                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6568                 spin_unlock_bh(&bp->phy_lock);
6569
6570                 return err;
6571
6572         default:
6573                 /* do nothing */
6574                 break;
6575         }
6576         return -EOPNOTSUPP;
6577 }
6578
6579 /* Called with rtnl_lock */
6580 static int
6581 bnx2_change_mac_addr(struct net_device *dev, void *p)
6582 {
6583         struct sockaddr *addr = p;
6584         struct bnx2 *bp = netdev_priv(dev);
6585
6586         if (!is_valid_ether_addr(addr->sa_data))
6587                 return -EINVAL;
6588
6589         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6590         if (netif_running(dev))
6591                 bnx2_set_mac_addr(bp);
6592
6593         return 0;
6594 }
6595
6596 /* Called with rtnl_lock */
6597 static int
6598 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6599 {
6600         struct bnx2 *bp = netdev_priv(dev);
6601
6602         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6603                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6604                 return -EINVAL;
6605
6606         dev->mtu = new_mtu;
6607         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6608 }
6609
6610 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6611 static void
6612 poll_bnx2(struct net_device *dev)
6613 {
6614         struct bnx2 *bp = netdev_priv(dev);
6615
6616         disable_irq(bp->pdev->irq);
6617         bnx2_interrupt(bp->pdev->irq, dev);
6618         enable_irq(bp->pdev->irq);
6619 }
6620 #endif
6621
6622 static void __devinit
6623 bnx2_get_5709_media(struct bnx2 *bp)
6624 {
6625         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6626         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6627         u32 strap;
6628
6629         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6630                 return;
6631         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6632                 bp->phy_flags |= PHY_SERDES_FLAG;
6633                 return;
6634         }
6635
6636         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6637                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6638         else
6639                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6640
6641         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6642                 switch (strap) {
6643                 case 0x4:
6644                 case 0x5:
6645                 case 0x6:
6646                         bp->phy_flags |= PHY_SERDES_FLAG;
6647                         return;
6648                 }
6649         } else {
6650                 switch (strap) {
6651                 case 0x1:
6652                 case 0x2:
6653                 case 0x4:
6654                         bp->phy_flags |= PHY_SERDES_FLAG;
6655                         return;
6656                 }
6657         }
6658 }
6659
6660 static void __devinit
6661 bnx2_get_pci_speed(struct bnx2 *bp)
6662 {
6663         u32 reg;
6664
6665         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6666         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6667                 u32 clkreg;
6668
6669                 bp->flags |= PCIX_FLAG;
6670
6671                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6672
6673                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6674                 switch (clkreg) {
6675                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6676                         bp->bus_speed_mhz = 133;
6677                         break;
6678
6679                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6680                         bp->bus_speed_mhz = 100;
6681                         break;
6682
6683                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6684                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6685                         bp->bus_speed_mhz = 66;
6686                         break;
6687
6688                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6689                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6690                         bp->bus_speed_mhz = 50;
6691                         break;
6692
6693                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6694                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6695                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6696                         bp->bus_speed_mhz = 33;
6697                         break;
6698                 }
6699         }
6700         else {
6701                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6702                         bp->bus_speed_mhz = 66;
6703                 else
6704                         bp->bus_speed_mhz = 33;
6705         }
6706
6707         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6708                 bp->flags |= PCI_32BIT_FLAG;
6709
6710 }
6711
6712 static int __devinit
6713 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6714 {
6715         struct bnx2 *bp;
6716         unsigned long mem_len;
6717         int rc, i, j;
6718         u32 reg;
6719         u64 dma_mask, persist_dma_mask;
6720
6721         SET_NETDEV_DEV(dev, &pdev->dev);
6722         bp = netdev_priv(dev);
6723
6724         bp->flags = 0;
6725         bp->phy_flags = 0;
6726
6727         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6728         rc = pci_enable_device(pdev);
6729         if (rc) {
6730                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6731                 goto err_out;
6732         }
6733
6734         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6735                 dev_err(&pdev->dev,
6736                         "Cannot find PCI device base address, aborting.\n");
6737                 rc = -ENODEV;
6738                 goto err_out_disable;
6739         }
6740
6741         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6742         if (rc) {
6743                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6744                 goto err_out_disable;
6745         }
6746
6747         pci_set_master(pdev);
6748
6749         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6750         if (bp->pm_cap == 0) {
6751                 dev_err(&pdev->dev,
6752                         "Cannot find power management capability, aborting.\n");
6753                 rc = -EIO;
6754                 goto err_out_release;
6755         }
6756
6757         bp->dev = dev;
6758         bp->pdev = pdev;
6759
6760         spin_lock_init(&bp->phy_lock);
6761         spin_lock_init(&bp->indirect_lock);
6762         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6763
6764         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6765         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6766         dev->mem_end = dev->mem_start + mem_len;
6767         dev->irq = pdev->irq;
6768
6769         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6770
6771         if (!bp->regview) {
6772                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6773                 rc = -ENOMEM;
6774                 goto err_out_release;
6775         }
6776
6777         /* Configure byte swap and enable write to the reg_window registers.
6778          * Rely on CPU to do target byte swapping on big endian systems
6779          * The chip's target access swapping will not swap all accesses
6780          */
6781         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6782                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6783                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6784
6785         bnx2_set_power_state(bp, PCI_D0);
6786
6787         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6788
6789         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6790                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6791                         dev_err(&pdev->dev,
6792                                 "Cannot find PCIE capability, aborting.\n");
6793                         rc = -EIO;
6794                         goto err_out_unmap;
6795                 }
6796                 bp->flags |= PCIE_FLAG;
6797         } else {
6798                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6799                 if (bp->pcix_cap == 0) {
6800                         dev_err(&pdev->dev,
6801                                 "Cannot find PCIX capability, aborting.\n");
6802                         rc = -EIO;
6803                         goto err_out_unmap;
6804                 }
6805         }
6806
6807         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6808                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6809                         bp->flags |= MSI_CAP_FLAG;
6810         }
6811
6812         /* 5708 cannot support DMA addresses > 40-bit.  */
6813         if (CHIP_NUM(bp) == CHIP_NUM_5708)
6814                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6815         else
6816                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6817
6818         /* Configure DMA attributes. */
6819         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6820                 dev->features |= NETIF_F_HIGHDMA;
6821                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6822                 if (rc) {
6823                         dev_err(&pdev->dev,
6824                                 "pci_set_consistent_dma_mask failed, aborting.\n");
6825                         goto err_out_unmap;
6826                 }
6827         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6828                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6829                 goto err_out_unmap;
6830         }
6831
6832         if (!(bp->flags & PCIE_FLAG))
6833                 bnx2_get_pci_speed(bp);
6834
6835         /* 5706A0 may falsely detect SERR and PERR. */
6836         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6837                 reg = REG_RD(bp, PCI_COMMAND);
6838                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6839                 REG_WR(bp, PCI_COMMAND, reg);
6840         }
6841         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6842                 !(bp->flags & PCIX_FLAG)) {
6843
6844                 dev_err(&pdev->dev,
6845                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
6846                 goto err_out_unmap;
6847         }
6848
6849         bnx2_init_nvram(bp);
6850
6851         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6852
6853         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6854             BNX2_SHM_HDR_SIGNATURE_SIG) {
6855                 u32 off = PCI_FUNC(pdev->devfn) << 2;
6856
6857                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6858         } else
6859                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6860
6861         /* Get the permanent MAC address.  First we need to make sure the
6862          * firmware is actually running.
6863          */
6864         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6865
6866         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6867             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6868                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6869                 rc = -ENODEV;
6870                 goto err_out_unmap;
6871         }
6872
6873         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6874         for (i = 0, j = 0; i < 3; i++) {
6875                 u8 num, k, skip0;
6876
6877                 num = (u8) (reg >> (24 - (i * 8)));
6878                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6879                         if (num >= k || !skip0 || k == 1) {
6880                                 bp->fw_version[j++] = (num / k) + '0';
6881                                 skip0 = 0;
6882                         }
6883                 }
6884                 if (i != 2)
6885                         bp->fw_version[j++] = '.';
6886         }
6887         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6888         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6889                 bp->wol = 1;
6890
6891         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
6892                 bp->flags |= ASF_ENABLE_FLAG;
6893
6894                 for (i = 0; i < 30; i++) {
6895                         reg = REG_RD_IND(bp, bp->shmem_base +
6896                                              BNX2_BC_STATE_CONDITION);
6897                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6898                                 break;
6899                         msleep(10);
6900                 }
6901         }
6902         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6903         reg &= BNX2_CONDITION_MFW_RUN_MASK;
6904         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6905             reg != BNX2_CONDITION_MFW_RUN_NONE) {
6906                 int i;
6907                 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6908
6909                 bp->fw_version[j++] = ' ';
6910                 for (i = 0; i < 3; i++) {
6911                         reg = REG_RD_IND(bp, addr + i * 4);
6912                         reg = swab32(reg);
6913                         memcpy(&bp->fw_version[j], &reg, 4);
6914                         j += 4;
6915                 }
6916         }
6917
6918         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6919         bp->mac_addr[0] = (u8) (reg >> 8);
6920         bp->mac_addr[1] = (u8) reg;
6921
6922         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6923         bp->mac_addr[2] = (u8) (reg >> 24);
6924         bp->mac_addr[3] = (u8) (reg >> 16);
6925         bp->mac_addr[4] = (u8) (reg >> 8);
6926         bp->mac_addr[5] = (u8) reg;
6927
6928         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6929
6930         bp->tx_ring_size = MAX_TX_DESC_CNT;
6931         bnx2_set_rx_ring_size(bp, 255);
6932
6933         bp->rx_csum = 1;
6934
6935         bp->tx_quick_cons_trip_int = 20;
6936         bp->tx_quick_cons_trip = 20;
6937         bp->tx_ticks_int = 80;
6938         bp->tx_ticks = 80;
6939
6940         bp->rx_quick_cons_trip_int = 6;
6941         bp->rx_quick_cons_trip = 6;
6942         bp->rx_ticks_int = 18;
6943         bp->rx_ticks = 18;
6944
6945         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6946
6947         bp->timer_interval =  HZ;
6948         bp->current_interval =  HZ;
6949
6950         bp->phy_addr = 1;
6951
6952         /* Disable WOL support if we are running on a SERDES chip. */
6953         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6954                 bnx2_get_5709_media(bp);
6955         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6956                 bp->phy_flags |= PHY_SERDES_FLAG;
6957
6958         bp->phy_port = PORT_TP;
6959         if (bp->phy_flags & PHY_SERDES_FLAG) {
6960                 bp->phy_port = PORT_FIBRE;
6961                 reg = REG_RD_IND(bp, bp->shmem_base +
6962                                      BNX2_SHARED_HW_CFG_CONFIG);
6963                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6964                         bp->flags |= NO_WOL_FLAG;
6965                         bp->wol = 0;
6966                 }
6967                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6968                         bp->phy_addr = 2;
6969                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6970                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6971                 }
6972                 bnx2_init_remote_phy(bp);
6973
6974         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6975                    CHIP_NUM(bp) == CHIP_NUM_5708)
6976                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6977         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6978                  (CHIP_REV(bp) == CHIP_REV_Ax ||
6979                   CHIP_REV(bp) == CHIP_REV_Bx))
6980                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6981
6982         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6983             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6984             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
6985                 bp->flags |= NO_WOL_FLAG;
6986                 bp->wol = 0;
6987         }
6988
6989         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6990                 bp->tx_quick_cons_trip_int =
6991                         bp->tx_quick_cons_trip;
6992                 bp->tx_ticks_int = bp->tx_ticks;
6993                 bp->rx_quick_cons_trip_int =
6994                         bp->rx_quick_cons_trip;
6995                 bp->rx_ticks_int = bp->rx_ticks;
6996                 bp->comp_prod_trip_int = bp->comp_prod_trip;
6997                 bp->com_ticks_int = bp->com_ticks;
6998                 bp->cmd_ticks_int = bp->cmd_ticks;
6999         }
7000
7001         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7002          *
7003          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7004          * with byte enables disabled on the unused 32-bit word.  This is legal
7005          * but causes problems on the AMD 8132 which will eventually stop
7006          * responding after a while.
7007          *
7008          * AMD believes this incompatibility is unique to the 5706, and
7009          * prefers to locally disable MSI rather than globally disabling it.
7010          */
7011         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7012                 struct pci_dev *amd_8132 = NULL;
7013
7014                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7015                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7016                                                   amd_8132))) {
7017
7018                         if (amd_8132->revision >= 0x10 &&
7019                             amd_8132->revision <= 0x13) {
7020                                 disable_msi = 1;
7021                                 pci_dev_put(amd_8132);
7022                                 break;
7023                         }
7024                 }
7025         }
7026
7027         bnx2_set_default_link(bp);
7028         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7029
7030         init_timer(&bp->timer);
7031         bp->timer.expires = RUN_AT(bp->timer_interval);
7032         bp->timer.data = (unsigned long) bp;
7033         bp->timer.function = bnx2_timer;
7034
7035         return 0;
7036
7037 err_out_unmap:
7038         if (bp->regview) {
7039                 iounmap(bp->regview);
7040                 bp->regview = NULL;
7041         }
7042
7043 err_out_release:
7044         pci_release_regions(pdev);
7045
7046 err_out_disable:
7047         pci_disable_device(pdev);
7048         pci_set_drvdata(pdev, NULL);
7049
7050 err_out:
7051         return rc;
7052 }
7053
7054 static char * __devinit
7055 bnx2_bus_string(struct bnx2 *bp, char *str)
7056 {
7057         char *s = str;
7058
7059         if (bp->flags & PCIE_FLAG) {
7060                 s += sprintf(s, "PCI Express");
7061         } else {
7062                 s += sprintf(s, "PCI");
7063                 if (bp->flags & PCIX_FLAG)
7064                         s += sprintf(s, "-X");
7065                 if (bp->flags & PCI_32BIT_FLAG)
7066                         s += sprintf(s, " 32-bit");
7067                 else
7068                         s += sprintf(s, " 64-bit");
7069                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7070         }
7071         return str;
7072 }
7073
7074 static int __devinit
7075 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7076 {
7077         static int version_printed = 0;
7078         struct net_device *dev = NULL;
7079         struct bnx2 *bp;
7080         int rc;
7081         char str[40];
7082         DECLARE_MAC_BUF(mac);
7083
7084         if (version_printed++ == 0)
7085                 printk(KERN_INFO "%s", version);
7086
7087         /* dev zeroed in init_etherdev */
7088         dev = alloc_etherdev(sizeof(*bp));
7089
7090         if (!dev)
7091                 return -ENOMEM;
7092
7093         rc = bnx2_init_board(pdev, dev);
7094         if (rc < 0) {
7095                 free_netdev(dev);
7096                 return rc;
7097         }
7098
7099         dev->open = bnx2_open;
7100         dev->hard_start_xmit = bnx2_start_xmit;
7101         dev->stop = bnx2_close;
7102         dev->get_stats = bnx2_get_stats;
7103         dev->set_multicast_list = bnx2_set_rx_mode;
7104         dev->do_ioctl = bnx2_ioctl;
7105         dev->set_mac_address = bnx2_change_mac_addr;
7106         dev->change_mtu = bnx2_change_mtu;
7107         dev->tx_timeout = bnx2_tx_timeout;
7108         dev->watchdog_timeo = TX_TIMEOUT;
7109 #ifdef BCM_VLAN
7110         dev->vlan_rx_register = bnx2_vlan_rx_register;
7111 #endif
7112         dev->ethtool_ops = &bnx2_ethtool_ops;
7113
7114         bp = netdev_priv(dev);
7115         netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
7116
7117 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7118         dev->poll_controller = poll_bnx2;
7119 #endif
7120
7121         pci_set_drvdata(pdev, dev);
7122
7123         memcpy(dev->dev_addr, bp->mac_addr, 6);
7124         memcpy(dev->perm_addr, bp->mac_addr, 6);
7125         bp->name = board_info[ent->driver_data].name;
7126
7127         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7128         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7129                 dev->features |= NETIF_F_IPV6_CSUM;
7130
7131 #ifdef BCM_VLAN
7132         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7133 #endif
7134         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7135         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7136                 dev->features |= NETIF_F_TSO6;
7137
7138         if ((rc = register_netdev(dev))) {
7139                 dev_err(&pdev->dev, "Cannot register net device\n");
7140                 if (bp->regview)
7141                         iounmap(bp->regview);
7142                 pci_release_regions(pdev);
7143                 pci_disable_device(pdev);
7144                 pci_set_drvdata(pdev, NULL);
7145                 free_netdev(dev);
7146                 return rc;
7147         }
7148
7149         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7150                 "IRQ %d, node addr %s\n",
7151                 dev->name,
7152                 bp->name,
7153                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7154                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7155                 bnx2_bus_string(bp, str),
7156                 dev->base_addr,
7157                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7158
7159         return 0;
7160 }
7161
7162 static void __devexit
7163 bnx2_remove_one(struct pci_dev *pdev)
7164 {
7165         struct net_device *dev = pci_get_drvdata(pdev);
7166         struct bnx2 *bp = netdev_priv(dev);
7167
7168         flush_scheduled_work();
7169
7170         unregister_netdev(dev);
7171
7172         if (bp->regview)
7173                 iounmap(bp->regview);
7174
7175         free_netdev(dev);
7176         pci_release_regions(pdev);
7177         pci_disable_device(pdev);
7178         pci_set_drvdata(pdev, NULL);
7179 }
7180
7181 static int
7182 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7183 {
7184         struct net_device *dev = pci_get_drvdata(pdev);
7185         struct bnx2 *bp = netdev_priv(dev);
7186         u32 reset_code;
7187
7188         /* PCI register 4 needs to be saved whether netif_running() or not.
7189          * MSI address and data need to be saved if using MSI and
7190          * netif_running().
7191          */
7192         pci_save_state(pdev);
7193         if (!netif_running(dev))
7194                 return 0;
7195
7196         flush_scheduled_work();
7197         bnx2_netif_stop(bp);
7198         netif_device_detach(dev);
7199         del_timer_sync(&bp->timer);
7200         if (bp->flags & NO_WOL_FLAG)
7201                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7202         else if (bp->wol)
7203                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7204         else
7205                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7206         bnx2_reset_chip(bp, reset_code);
7207         bnx2_free_skbs(bp);
7208         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7209         return 0;
7210 }
7211
7212 static int
7213 bnx2_resume(struct pci_dev *pdev)
7214 {
7215         struct net_device *dev = pci_get_drvdata(pdev);
7216         struct bnx2 *bp = netdev_priv(dev);
7217
7218         pci_restore_state(pdev);
7219         if (!netif_running(dev))
7220                 return 0;
7221
7222         bnx2_set_power_state(bp, PCI_D0);
7223         netif_device_attach(dev);
7224         bnx2_init_nic(bp);
7225         bnx2_netif_start(bp);
7226         return 0;
7227 }
7228
7229 static struct pci_driver bnx2_pci_driver = {
7230         .name           = DRV_MODULE_NAME,
7231         .id_table       = bnx2_pci_tbl,
7232         .probe          = bnx2_init_one,
7233         .remove         = __devexit_p(bnx2_remove_one),
7234         .suspend        = bnx2_suspend,
7235         .resume         = bnx2_resume,
7236 };
7237
7238 static int __init bnx2_init(void)
7239 {
7240         return pci_register_driver(&bnx2_pci_driver);
7241 }
7242
7243 static void __exit bnx2_cleanup(void)
7244 {
7245         pci_unregister_driver(&bnx2_pci_driver);
7246 }
7247
7248 module_init(bnx2_init);
7249 module_exit(bnx2_cleanup);
7250
7251
7252