[BNX2]: Fix occasional NETDEV WATCHDOG on 5709.
[sfrench/cifs-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "1.5.8"
58 #define DRV_MODULE_RELDATE      "April 24, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT  (5*HZ)
64
65 static const char version[] __devinitdata =
66         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79         BCM5706 = 0,
80         NC370T,
81         NC370I,
82         BCM5706S,
83         NC370F,
84         BCM5708,
85         BCM5708S,
86         BCM5709,
87 } board_t;
88
89 /* indexed by board_t, above */
90 static const struct {
91         char *name;
92 } board_info[] __devinitdata = {
93         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94         { "HP NC370T Multifunction Gigabit Server Adapter" },
95         { "HP NC370i Multifunction Gigabit Server Adapter" },
96         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97         { "HP NC370F Multifunction Gigabit Server Adapter" },
98         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
100         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
101         };
102
103 static struct pci_device_id bnx2_pci_tbl[] = {
104         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
120         { 0, }
121 };
122
123 static struct flash_spec flash_table[] =
124 {
125         /* Slow EEPROM */
126         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
127          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
128          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129          "EEPROM - slow"},
130         /* Expansion entry 0001 */
131         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
132          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
133          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134          "Entry 0001"},
135         /* Saifun SA25F010 (non-buffered flash) */
136         /* strap, cfg1, & write1 need updates */
137         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
138          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
140          "Non-buffered flash (128kB)"},
141         /* Saifun SA25F020 (non-buffered flash) */
142         /* strap, cfg1, & write1 need updates */
143         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
144          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
146          "Non-buffered flash (256kB)"},
147         /* Expansion entry 0100 */
148         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
149          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151          "Entry 0100"},
152         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
153         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
154          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
155          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
156          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
157         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
158         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
159          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
161          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
162         /* Saifun SA25F005 (non-buffered flash) */
163         /* strap, cfg1, & write1 need updates */
164         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
165          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
167          "Non-buffered flash (64kB)"},
168         /* Fast EEPROM */
169         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
170          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172          "EEPROM - fast"},
173         /* Expansion entry 1001 */
174         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
175          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177          "Entry 1001"},
178         /* Expansion entry 1010 */
179         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
180          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182          "Entry 1010"},
183         /* ATMEL AT45DB011B (buffered flash) */
184         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
185          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
186          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
187          "Buffered flash (128kB)"},
188         /* Expansion entry 1100 */
189         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
190          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
192          "Entry 1100"},
193         /* Expansion entry 1101 */
194         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
195          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197          "Entry 1101"},
198         /* Ateml Expansion entry 1110 */
199         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
200          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
201          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
202          "Entry 1110 (Atmel)"},
203         /* ATMEL AT45DB021B (buffered flash) */
204         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
205          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
207          "Buffered flash (256kB)"},
208 };
209
210 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
211
212 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
213 {
214         u32 diff;
215
216         smp_mb();
217
218         /* The ring uses 256 indices for 255 entries, one of them
219          * needs to be skipped.
220          */
221         diff = bp->tx_prod - bp->tx_cons;
222         if (unlikely(diff >= TX_DESC_CNT)) {
223                 diff &= 0xffff;
224                 if (diff == TX_DESC_CNT)
225                         diff = MAX_TX_DESC_CNT;
226         }
227         return (bp->tx_ring_size - diff);
228 }
229
230 static u32
231 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
232 {
233         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
234         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
235 }
236
237 static void
238 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
239 {
240         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
242 }
243
244 static void
245 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
246 {
247         offset += cid_addr;
248         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
249                 int i;
250
251                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
252                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
253                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
254                 for (i = 0; i < 5; i++) {
255                         u32 val;
256                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
257                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
258                                 break;
259                         udelay(5);
260                 }
261         } else {
262                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
263                 REG_WR(bp, BNX2_CTX_DATA, val);
264         }
265 }
266
267 static int
268 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
269 {
270         u32 val1;
271         int i, ret;
272
273         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
274                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
275                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
276
277                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
278                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
279
280                 udelay(40);
281         }
282
283         val1 = (bp->phy_addr << 21) | (reg << 16) |
284                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
285                 BNX2_EMAC_MDIO_COMM_START_BUSY;
286         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
287
288         for (i = 0; i < 50; i++) {
289                 udelay(10);
290
291                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
292                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
293                         udelay(5);
294
295                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
296                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
297
298                         break;
299                 }
300         }
301
302         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
303                 *val = 0x0;
304                 ret = -EBUSY;
305         }
306         else {
307                 *val = val1;
308                 ret = 0;
309         }
310
311         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
312                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318                 udelay(40);
319         }
320
321         return ret;
322 }
323
324 static int
325 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
326 {
327         u32 val1;
328         int i, ret;
329
330         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
331                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
332                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
333
334                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
335                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
336
337                 udelay(40);
338         }
339
340         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
341                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
342                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
343         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
344
345         for (i = 0; i < 50; i++) {
346                 udelay(10);
347
348                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
349                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
350                         udelay(5);
351                         break;
352                 }
353         }
354
355         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
356                 ret = -EBUSY;
357         else
358                 ret = 0;
359
360         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
361                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
363
364                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
365                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366
367                 udelay(40);
368         }
369
370         return ret;
371 }
372
373 static void
374 bnx2_disable_int(struct bnx2 *bp)
375 {
376         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
377                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
378         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
379 }
380
381 static void
382 bnx2_enable_int(struct bnx2 *bp)
383 {
384         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
385                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
386                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
387
388         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
390
391         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
392 }
393
394 static void
395 bnx2_disable_int_sync(struct bnx2 *bp)
396 {
397         atomic_inc(&bp->intr_sem);
398         bnx2_disable_int(bp);
399         synchronize_irq(bp->pdev->irq);
400 }
401
402 static void
403 bnx2_netif_stop(struct bnx2 *bp)
404 {
405         bnx2_disable_int_sync(bp);
406         if (netif_running(bp->dev)) {
407                 netif_poll_disable(bp->dev);
408                 netif_tx_disable(bp->dev);
409                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
410         }
411 }
412
413 static void
414 bnx2_netif_start(struct bnx2 *bp)
415 {
416         if (atomic_dec_and_test(&bp->intr_sem)) {
417                 if (netif_running(bp->dev)) {
418                         netif_wake_queue(bp->dev);
419                         netif_poll_enable(bp->dev);
420                         bnx2_enable_int(bp);
421                 }
422         }
423 }
424
425 static void
426 bnx2_free_mem(struct bnx2 *bp)
427 {
428         int i;
429
430         for (i = 0; i < bp->ctx_pages; i++) {
431                 if (bp->ctx_blk[i]) {
432                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
433                                             bp->ctx_blk[i],
434                                             bp->ctx_blk_mapping[i]);
435                         bp->ctx_blk[i] = NULL;
436                 }
437         }
438         if (bp->status_blk) {
439                 pci_free_consistent(bp->pdev, bp->status_stats_size,
440                                     bp->status_blk, bp->status_blk_mapping);
441                 bp->status_blk = NULL;
442                 bp->stats_blk = NULL;
443         }
444         if (bp->tx_desc_ring) {
445                 pci_free_consistent(bp->pdev,
446                                     sizeof(struct tx_bd) * TX_DESC_CNT,
447                                     bp->tx_desc_ring, bp->tx_desc_mapping);
448                 bp->tx_desc_ring = NULL;
449         }
450         kfree(bp->tx_buf_ring);
451         bp->tx_buf_ring = NULL;
452         for (i = 0; i < bp->rx_max_ring; i++) {
453                 if (bp->rx_desc_ring[i])
454                         pci_free_consistent(bp->pdev,
455                                             sizeof(struct rx_bd) * RX_DESC_CNT,
456                                             bp->rx_desc_ring[i],
457                                             bp->rx_desc_mapping[i]);
458                 bp->rx_desc_ring[i] = NULL;
459         }
460         vfree(bp->rx_buf_ring);
461         bp->rx_buf_ring = NULL;
462 }
463
464 static int
465 bnx2_alloc_mem(struct bnx2 *bp)
466 {
467         int i, status_blk_size;
468
469         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
470                                   GFP_KERNEL);
471         if (bp->tx_buf_ring == NULL)
472                 return -ENOMEM;
473
474         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
475                                                 sizeof(struct tx_bd) *
476                                                 TX_DESC_CNT,
477                                                 &bp->tx_desc_mapping);
478         if (bp->tx_desc_ring == NULL)
479                 goto alloc_mem_err;
480
481         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
482                                   bp->rx_max_ring);
483         if (bp->rx_buf_ring == NULL)
484                 goto alloc_mem_err;
485
486         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
487                                    bp->rx_max_ring);
488
489         for (i = 0; i < bp->rx_max_ring; i++) {
490                 bp->rx_desc_ring[i] =
491                         pci_alloc_consistent(bp->pdev,
492                                              sizeof(struct rx_bd) * RX_DESC_CNT,
493                                              &bp->rx_desc_mapping[i]);
494                 if (bp->rx_desc_ring[i] == NULL)
495                         goto alloc_mem_err;
496
497         }
498
499         /* Combine status and statistics blocks into one allocation. */
500         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
501         bp->status_stats_size = status_blk_size +
502                                 sizeof(struct statistics_block);
503
504         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
505                                               &bp->status_blk_mapping);
506         if (bp->status_blk == NULL)
507                 goto alloc_mem_err;
508
509         memset(bp->status_blk, 0, bp->status_stats_size);
510
511         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
512                                   status_blk_size);
513
514         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
515
516         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
517                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
518                 if (bp->ctx_pages == 0)
519                         bp->ctx_pages = 1;
520                 for (i = 0; i < bp->ctx_pages; i++) {
521                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
522                                                 BCM_PAGE_SIZE,
523                                                 &bp->ctx_blk_mapping[i]);
524                         if (bp->ctx_blk[i] == NULL)
525                                 goto alloc_mem_err;
526                 }
527         }
528         return 0;
529
530 alloc_mem_err:
531         bnx2_free_mem(bp);
532         return -ENOMEM;
533 }
534
535 static void
536 bnx2_report_fw_link(struct bnx2 *bp)
537 {
538         u32 fw_link_status = 0;
539
540         if (bp->link_up) {
541                 u32 bmsr;
542
543                 switch (bp->line_speed) {
544                 case SPEED_10:
545                         if (bp->duplex == DUPLEX_HALF)
546                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
547                         else
548                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
549                         break;
550                 case SPEED_100:
551                         if (bp->duplex == DUPLEX_HALF)
552                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
553                         else
554                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
555                         break;
556                 case SPEED_1000:
557                         if (bp->duplex == DUPLEX_HALF)
558                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
559                         else
560                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
561                         break;
562                 case SPEED_2500:
563                         if (bp->duplex == DUPLEX_HALF)
564                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
565                         else
566                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
567                         break;
568                 }
569
570                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
571
572                 if (bp->autoneg) {
573                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
574
575                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
576                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
577
578                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
579                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
580                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
581                         else
582                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
583                 }
584         }
585         else
586                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
587
588         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
589 }
590
591 static void
592 bnx2_report_link(struct bnx2 *bp)
593 {
594         if (bp->link_up) {
595                 netif_carrier_on(bp->dev);
596                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
597
598                 printk("%d Mbps ", bp->line_speed);
599
600                 if (bp->duplex == DUPLEX_FULL)
601                         printk("full duplex");
602                 else
603                         printk("half duplex");
604
605                 if (bp->flow_ctrl) {
606                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
607                                 printk(", receive ");
608                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
609                                         printk("& transmit ");
610                         }
611                         else {
612                                 printk(", transmit ");
613                         }
614                         printk("flow control ON");
615                 }
616                 printk("\n");
617         }
618         else {
619                 netif_carrier_off(bp->dev);
620                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
621         }
622
623         bnx2_report_fw_link(bp);
624 }
625
626 static void
627 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
628 {
629         u32 local_adv, remote_adv;
630
631         bp->flow_ctrl = 0;
632         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
633                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
634
635                 if (bp->duplex == DUPLEX_FULL) {
636                         bp->flow_ctrl = bp->req_flow_ctrl;
637                 }
638                 return;
639         }
640
641         if (bp->duplex != DUPLEX_FULL) {
642                 return;
643         }
644
645         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
646             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
647                 u32 val;
648
649                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
650                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
651                         bp->flow_ctrl |= FLOW_CTRL_TX;
652                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
653                         bp->flow_ctrl |= FLOW_CTRL_RX;
654                 return;
655         }
656
657         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
658         bnx2_read_phy(bp, MII_LPA, &remote_adv);
659
660         if (bp->phy_flags & PHY_SERDES_FLAG) {
661                 u32 new_local_adv = 0;
662                 u32 new_remote_adv = 0;
663
664                 if (local_adv & ADVERTISE_1000XPAUSE)
665                         new_local_adv |= ADVERTISE_PAUSE_CAP;
666                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
667                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
668                 if (remote_adv & ADVERTISE_1000XPAUSE)
669                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
670                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
671                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
672
673                 local_adv = new_local_adv;
674                 remote_adv = new_remote_adv;
675         }
676
677         /* See Table 28B-3 of 802.3ab-1999 spec. */
678         if (local_adv & ADVERTISE_PAUSE_CAP) {
679                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
680                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
681                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
682                         }
683                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
684                                 bp->flow_ctrl = FLOW_CTRL_RX;
685                         }
686                 }
687                 else {
688                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
689                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
690                         }
691                 }
692         }
693         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
694                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
695                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
696
697                         bp->flow_ctrl = FLOW_CTRL_TX;
698                 }
699         }
700 }
701
702 static int
703 bnx2_5708s_linkup(struct bnx2 *bp)
704 {
705         u32 val;
706
707         bp->link_up = 1;
708         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
709         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
710                 case BCM5708S_1000X_STAT1_SPEED_10:
711                         bp->line_speed = SPEED_10;
712                         break;
713                 case BCM5708S_1000X_STAT1_SPEED_100:
714                         bp->line_speed = SPEED_100;
715                         break;
716                 case BCM5708S_1000X_STAT1_SPEED_1G:
717                         bp->line_speed = SPEED_1000;
718                         break;
719                 case BCM5708S_1000X_STAT1_SPEED_2G5:
720                         bp->line_speed = SPEED_2500;
721                         break;
722         }
723         if (val & BCM5708S_1000X_STAT1_FD)
724                 bp->duplex = DUPLEX_FULL;
725         else
726                 bp->duplex = DUPLEX_HALF;
727
728         return 0;
729 }
730
731 static int
732 bnx2_5706s_linkup(struct bnx2 *bp)
733 {
734         u32 bmcr, local_adv, remote_adv, common;
735
736         bp->link_up = 1;
737         bp->line_speed = SPEED_1000;
738
739         bnx2_read_phy(bp, MII_BMCR, &bmcr);
740         if (bmcr & BMCR_FULLDPLX) {
741                 bp->duplex = DUPLEX_FULL;
742         }
743         else {
744                 bp->duplex = DUPLEX_HALF;
745         }
746
747         if (!(bmcr & BMCR_ANENABLE)) {
748                 return 0;
749         }
750
751         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
752         bnx2_read_phy(bp, MII_LPA, &remote_adv);
753
754         common = local_adv & remote_adv;
755         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
756
757                 if (common & ADVERTISE_1000XFULL) {
758                         bp->duplex = DUPLEX_FULL;
759                 }
760                 else {
761                         bp->duplex = DUPLEX_HALF;
762                 }
763         }
764
765         return 0;
766 }
767
768 static int
769 bnx2_copper_linkup(struct bnx2 *bp)
770 {
771         u32 bmcr;
772
773         bnx2_read_phy(bp, MII_BMCR, &bmcr);
774         if (bmcr & BMCR_ANENABLE) {
775                 u32 local_adv, remote_adv, common;
776
777                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
778                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
779
780                 common = local_adv & (remote_adv >> 2);
781                 if (common & ADVERTISE_1000FULL) {
782                         bp->line_speed = SPEED_1000;
783                         bp->duplex = DUPLEX_FULL;
784                 }
785                 else if (common & ADVERTISE_1000HALF) {
786                         bp->line_speed = SPEED_1000;
787                         bp->duplex = DUPLEX_HALF;
788                 }
789                 else {
790                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
791                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
792
793                         common = local_adv & remote_adv;
794                         if (common & ADVERTISE_100FULL) {
795                                 bp->line_speed = SPEED_100;
796                                 bp->duplex = DUPLEX_FULL;
797                         }
798                         else if (common & ADVERTISE_100HALF) {
799                                 bp->line_speed = SPEED_100;
800                                 bp->duplex = DUPLEX_HALF;
801                         }
802                         else if (common & ADVERTISE_10FULL) {
803                                 bp->line_speed = SPEED_10;
804                                 bp->duplex = DUPLEX_FULL;
805                         }
806                         else if (common & ADVERTISE_10HALF) {
807                                 bp->line_speed = SPEED_10;
808                                 bp->duplex = DUPLEX_HALF;
809                         }
810                         else {
811                                 bp->line_speed = 0;
812                                 bp->link_up = 0;
813                         }
814                 }
815         }
816         else {
817                 if (bmcr & BMCR_SPEED100) {
818                         bp->line_speed = SPEED_100;
819                 }
820                 else {
821                         bp->line_speed = SPEED_10;
822                 }
823                 if (bmcr & BMCR_FULLDPLX) {
824                         bp->duplex = DUPLEX_FULL;
825                 }
826                 else {
827                         bp->duplex = DUPLEX_HALF;
828                 }
829         }
830
831         return 0;
832 }
833
834 static int
835 bnx2_set_mac_link(struct bnx2 *bp)
836 {
837         u32 val;
838
839         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
840         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
841                 (bp->duplex == DUPLEX_HALF)) {
842                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
843         }
844
845         /* Configure the EMAC mode register. */
846         val = REG_RD(bp, BNX2_EMAC_MODE);
847
848         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
849                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
850                 BNX2_EMAC_MODE_25G_MODE);
851
852         if (bp->link_up) {
853                 switch (bp->line_speed) {
854                         case SPEED_10:
855                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
856                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
857                                         break;
858                                 }
859                                 /* fall through */
860                         case SPEED_100:
861                                 val |= BNX2_EMAC_MODE_PORT_MII;
862                                 break;
863                         case SPEED_2500:
864                                 val |= BNX2_EMAC_MODE_25G_MODE;
865                                 /* fall through */
866                         case SPEED_1000:
867                                 val |= BNX2_EMAC_MODE_PORT_GMII;
868                                 break;
869                 }
870         }
871         else {
872                 val |= BNX2_EMAC_MODE_PORT_GMII;
873         }
874
875         /* Set the MAC to operate in the appropriate duplex mode. */
876         if (bp->duplex == DUPLEX_HALF)
877                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
878         REG_WR(bp, BNX2_EMAC_MODE, val);
879
880         /* Enable/disable rx PAUSE. */
881         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
882
883         if (bp->flow_ctrl & FLOW_CTRL_RX)
884                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
885         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
886
887         /* Enable/disable tx PAUSE. */
888         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
889         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
890
891         if (bp->flow_ctrl & FLOW_CTRL_TX)
892                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
893         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
894
895         /* Acknowledge the interrupt. */
896         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
897
898         return 0;
899 }
900
901 static int
902 bnx2_set_link(struct bnx2 *bp)
903 {
904         u32 bmsr;
905         u8 link_up;
906
907         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
908                 bp->link_up = 1;
909                 return 0;
910         }
911
912         link_up = bp->link_up;
913
914         bnx2_read_phy(bp, MII_BMSR, &bmsr);
915         bnx2_read_phy(bp, MII_BMSR, &bmsr);
916
917         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
918             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
919                 u32 val;
920
921                 val = REG_RD(bp, BNX2_EMAC_STATUS);
922                 if (val & BNX2_EMAC_STATUS_LINK)
923                         bmsr |= BMSR_LSTATUS;
924                 else
925                         bmsr &= ~BMSR_LSTATUS;
926         }
927
928         if (bmsr & BMSR_LSTATUS) {
929                 bp->link_up = 1;
930
931                 if (bp->phy_flags & PHY_SERDES_FLAG) {
932                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
933                                 bnx2_5706s_linkup(bp);
934                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
935                                 bnx2_5708s_linkup(bp);
936                 }
937                 else {
938                         bnx2_copper_linkup(bp);
939                 }
940                 bnx2_resolve_flow_ctrl(bp);
941         }
942         else {
943                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
944                         (bp->autoneg & AUTONEG_SPEED)) {
945
946                         u32 bmcr;
947
948                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
949                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
950                         if (!(bmcr & BMCR_ANENABLE)) {
951                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
952                                         BMCR_ANENABLE);
953                         }
954                 }
955                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
956                 bp->link_up = 0;
957         }
958
959         if (bp->link_up != link_up) {
960                 bnx2_report_link(bp);
961         }
962
963         bnx2_set_mac_link(bp);
964
965         return 0;
966 }
967
968 static int
969 bnx2_reset_phy(struct bnx2 *bp)
970 {
971         int i;
972         u32 reg;
973
974         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
975
976 #define PHY_RESET_MAX_WAIT 100
977         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
978                 udelay(10);
979
980                 bnx2_read_phy(bp, MII_BMCR, &reg);
981                 if (!(reg & BMCR_RESET)) {
982                         udelay(20);
983                         break;
984                 }
985         }
986         if (i == PHY_RESET_MAX_WAIT) {
987                 return -EBUSY;
988         }
989         return 0;
990 }
991
992 static u32
993 bnx2_phy_get_pause_adv(struct bnx2 *bp)
994 {
995         u32 adv = 0;
996
997         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
998                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
999
1000                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1001                         adv = ADVERTISE_1000XPAUSE;
1002                 }
1003                 else {
1004                         adv = ADVERTISE_PAUSE_CAP;
1005                 }
1006         }
1007         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1008                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1009                         adv = ADVERTISE_1000XPSE_ASYM;
1010                 }
1011                 else {
1012                         adv = ADVERTISE_PAUSE_ASYM;
1013                 }
1014         }
1015         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1016                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1017                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1018                 }
1019                 else {
1020                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1021                 }
1022         }
1023         return adv;
1024 }
1025
1026 static int
1027 bnx2_setup_serdes_phy(struct bnx2 *bp)
1028 {
1029         u32 adv, bmcr, up1;
1030         u32 new_adv = 0;
1031
1032         if (!(bp->autoneg & AUTONEG_SPEED)) {
1033                 u32 new_bmcr;
1034                 int force_link_down = 0;
1035
1036                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1037                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1038
1039                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1040                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1041                 new_bmcr |= BMCR_SPEED1000;
1042                 if (bp->req_line_speed == SPEED_2500) {
1043                         new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1044                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045                         if (!(up1 & BCM5708S_UP1_2G5)) {
1046                                 up1 |= BCM5708S_UP1_2G5;
1047                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1048                                 force_link_down = 1;
1049                         }
1050                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1051                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1052                         if (up1 & BCM5708S_UP1_2G5) {
1053                                 up1 &= ~BCM5708S_UP1_2G5;
1054                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1055                                 force_link_down = 1;
1056                         }
1057                 }
1058
1059                 if (bp->req_duplex == DUPLEX_FULL) {
1060                         adv |= ADVERTISE_1000XFULL;
1061                         new_bmcr |= BMCR_FULLDPLX;
1062                 }
1063                 else {
1064                         adv |= ADVERTISE_1000XHALF;
1065                         new_bmcr &= ~BMCR_FULLDPLX;
1066                 }
1067                 if ((new_bmcr != bmcr) || (force_link_down)) {
1068                         /* Force a link down visible on the other side */
1069                         if (bp->link_up) {
1070                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1071                                                ~(ADVERTISE_1000XFULL |
1072                                                  ADVERTISE_1000XHALF));
1073                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
1074                                         BMCR_ANRESTART | BMCR_ANENABLE);
1075
1076                                 bp->link_up = 0;
1077                                 netif_carrier_off(bp->dev);
1078                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1079                                 bnx2_report_link(bp);
1080                         }
1081                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
1082                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1083                 }
1084                 return 0;
1085         }
1086
1087         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1088                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1089                 up1 |= BCM5708S_UP1_2G5;
1090                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1091         }
1092
1093         if (bp->advertising & ADVERTISED_1000baseT_Full)
1094                 new_adv |= ADVERTISE_1000XFULL;
1095
1096         new_adv |= bnx2_phy_get_pause_adv(bp);
1097
1098         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1099         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1100
1101         bp->serdes_an_pending = 0;
1102         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1103                 /* Force a link down visible on the other side */
1104                 if (bp->link_up) {
1105                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1106                         spin_unlock_bh(&bp->phy_lock);
1107                         msleep(20);
1108                         spin_lock_bh(&bp->phy_lock);
1109                 }
1110
1111                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1112                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1113                         BMCR_ANENABLE);
1114                 /* Speed up link-up time when the link partner
1115                  * does not autonegotiate which is very common
1116                  * in blade servers. Some blade servers use
1117                  * IPMI for kerboard input and it's important
1118                  * to minimize link disruptions. Autoneg. involves
1119                  * exchanging base pages plus 3 next pages and
1120                  * normally completes in about 120 msec.
1121                  */
1122                 bp->current_interval = SERDES_AN_TIMEOUT;
1123                 bp->serdes_an_pending = 1;
1124                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1125         }
1126
1127         return 0;
1128 }
1129
1130 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1131         (ADVERTISED_1000baseT_Full)
1132
1133 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1134         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1135         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1136         ADVERTISED_1000baseT_Full)
1137
1138 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1139         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1140
1141 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1142
1143 static int
1144 bnx2_setup_copper_phy(struct bnx2 *bp)
1145 {
1146         u32 bmcr;
1147         u32 new_bmcr;
1148
1149         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1150
1151         if (bp->autoneg & AUTONEG_SPEED) {
1152                 u32 adv_reg, adv1000_reg;
1153                 u32 new_adv_reg = 0;
1154                 u32 new_adv1000_reg = 0;
1155
1156                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1157                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1158                         ADVERTISE_PAUSE_ASYM);
1159
1160                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1161                 adv1000_reg &= PHY_ALL_1000_SPEED;
1162
1163                 if (bp->advertising & ADVERTISED_10baseT_Half)
1164                         new_adv_reg |= ADVERTISE_10HALF;
1165                 if (bp->advertising & ADVERTISED_10baseT_Full)
1166                         new_adv_reg |= ADVERTISE_10FULL;
1167                 if (bp->advertising & ADVERTISED_100baseT_Half)
1168                         new_adv_reg |= ADVERTISE_100HALF;
1169                 if (bp->advertising & ADVERTISED_100baseT_Full)
1170                         new_adv_reg |= ADVERTISE_100FULL;
1171                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1172                         new_adv1000_reg |= ADVERTISE_1000FULL;
1173
1174                 new_adv_reg |= ADVERTISE_CSMA;
1175
1176                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1177
1178                 if ((adv1000_reg != new_adv1000_reg) ||
1179                         (adv_reg != new_adv_reg) ||
1180                         ((bmcr & BMCR_ANENABLE) == 0)) {
1181
1182                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1183                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1184                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1185                                 BMCR_ANENABLE);
1186                 }
1187                 else if (bp->link_up) {
1188                         /* Flow ctrl may have changed from auto to forced */
1189                         /* or vice-versa. */
1190
1191                         bnx2_resolve_flow_ctrl(bp);
1192                         bnx2_set_mac_link(bp);
1193                 }
1194                 return 0;
1195         }
1196
1197         new_bmcr = 0;
1198         if (bp->req_line_speed == SPEED_100) {
1199                 new_bmcr |= BMCR_SPEED100;
1200         }
1201         if (bp->req_duplex == DUPLEX_FULL) {
1202                 new_bmcr |= BMCR_FULLDPLX;
1203         }
1204         if (new_bmcr != bmcr) {
1205                 u32 bmsr;
1206
1207                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1208                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1209
1210                 if (bmsr & BMSR_LSTATUS) {
1211                         /* Force link down */
1212                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1213                         spin_unlock_bh(&bp->phy_lock);
1214                         msleep(50);
1215                         spin_lock_bh(&bp->phy_lock);
1216
1217                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1218                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1219                 }
1220
1221                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1222
1223                 /* Normally, the new speed is setup after the link has
1224                  * gone down and up again. In some cases, link will not go
1225                  * down so we need to set up the new speed here.
1226                  */
1227                 if (bmsr & BMSR_LSTATUS) {
1228                         bp->line_speed = bp->req_line_speed;
1229                         bp->duplex = bp->req_duplex;
1230                         bnx2_resolve_flow_ctrl(bp);
1231                         bnx2_set_mac_link(bp);
1232                 }
1233         }
1234         return 0;
1235 }
1236
1237 static int
1238 bnx2_setup_phy(struct bnx2 *bp)
1239 {
1240         if (bp->loopback == MAC_LOOPBACK)
1241                 return 0;
1242
1243         if (bp->phy_flags & PHY_SERDES_FLAG) {
1244                 return (bnx2_setup_serdes_phy(bp));
1245         }
1246         else {
1247                 return (bnx2_setup_copper_phy(bp));
1248         }
1249 }
1250
1251 static int
1252 bnx2_init_5708s_phy(struct bnx2 *bp)
1253 {
1254         u32 val;
1255
1256         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1257         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1258         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1259
1260         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1261         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1262         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1263
1264         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1265         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1266         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1267
1268         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1269                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1270                 val |= BCM5708S_UP1_2G5;
1271                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1272         }
1273
1274         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1275             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1276             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1277                 /* increase tx signal amplitude */
1278                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1279                                BCM5708S_BLK_ADDR_TX_MISC);
1280                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1281                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1282                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1283                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1284         }
1285
1286         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1287               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1288
1289         if (val) {
1290                 u32 is_backplane;
1291
1292                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1293                                           BNX2_SHARED_HW_CFG_CONFIG);
1294                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1295                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1296                                        BCM5708S_BLK_ADDR_TX_MISC);
1297                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1298                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299                                        BCM5708S_BLK_ADDR_DIG);
1300                 }
1301         }
1302         return 0;
1303 }
1304
1305 static int
1306 bnx2_init_5706s_phy(struct bnx2 *bp)
1307 {
1308         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1309
1310         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1311                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1312
1313         if (bp->dev->mtu > 1500) {
1314                 u32 val;
1315
1316                 /* Set extended packet length bit */
1317                 bnx2_write_phy(bp, 0x18, 0x7);
1318                 bnx2_read_phy(bp, 0x18, &val);
1319                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1320
1321                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1322                 bnx2_read_phy(bp, 0x1c, &val);
1323                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1324         }
1325         else {
1326                 u32 val;
1327
1328                 bnx2_write_phy(bp, 0x18, 0x7);
1329                 bnx2_read_phy(bp, 0x18, &val);
1330                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1331
1332                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1333                 bnx2_read_phy(bp, 0x1c, &val);
1334                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1335         }
1336
1337         return 0;
1338 }
1339
1340 static int
1341 bnx2_init_copper_phy(struct bnx2 *bp)
1342 {
1343         u32 val;
1344
1345         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1346                 bnx2_write_phy(bp, 0x18, 0x0c00);
1347                 bnx2_write_phy(bp, 0x17, 0x000a);
1348                 bnx2_write_phy(bp, 0x15, 0x310b);
1349                 bnx2_write_phy(bp, 0x17, 0x201f);
1350                 bnx2_write_phy(bp, 0x15, 0x9506);
1351                 bnx2_write_phy(bp, 0x17, 0x401f);
1352                 bnx2_write_phy(bp, 0x15, 0x14e2);
1353                 bnx2_write_phy(bp, 0x18, 0x0400);
1354         }
1355
1356         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1357                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1358                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1359                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1360                 val &= ~(1 << 8);
1361                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1362         }
1363
1364         if (bp->dev->mtu > 1500) {
1365                 /* Set extended packet length bit */
1366                 bnx2_write_phy(bp, 0x18, 0x7);
1367                 bnx2_read_phy(bp, 0x18, &val);
1368                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1369
1370                 bnx2_read_phy(bp, 0x10, &val);
1371                 bnx2_write_phy(bp, 0x10, val | 0x1);
1372         }
1373         else {
1374                 bnx2_write_phy(bp, 0x18, 0x7);
1375                 bnx2_read_phy(bp, 0x18, &val);
1376                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1377
1378                 bnx2_read_phy(bp, 0x10, &val);
1379                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1380         }
1381
1382         /* ethernet@wirespeed */
1383         bnx2_write_phy(bp, 0x18, 0x7007);
1384         bnx2_read_phy(bp, 0x18, &val);
1385         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1386         return 0;
1387 }
1388
1389
1390 static int
1391 bnx2_init_phy(struct bnx2 *bp)
1392 {
1393         u32 val;
1394         int rc = 0;
1395
1396         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1397         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1398
1399         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1400
1401         bnx2_reset_phy(bp);
1402
1403         bnx2_read_phy(bp, MII_PHYSID1, &val);
1404         bp->phy_id = val << 16;
1405         bnx2_read_phy(bp, MII_PHYSID2, &val);
1406         bp->phy_id |= val & 0xffff;
1407
1408         if (bp->phy_flags & PHY_SERDES_FLAG) {
1409                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1410                         rc = bnx2_init_5706s_phy(bp);
1411                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1412                         rc = bnx2_init_5708s_phy(bp);
1413         }
1414         else {
1415                 rc = bnx2_init_copper_phy(bp);
1416         }
1417
1418         bnx2_setup_phy(bp);
1419
1420         return rc;
1421 }
1422
1423 static int
1424 bnx2_set_mac_loopback(struct bnx2 *bp)
1425 {
1426         u32 mac_mode;
1427
1428         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1429         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1430         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1431         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1432         bp->link_up = 1;
1433         return 0;
1434 }
1435
1436 static int bnx2_test_link(struct bnx2 *);
1437
1438 static int
1439 bnx2_set_phy_loopback(struct bnx2 *bp)
1440 {
1441         u32 mac_mode;
1442         int rc, i;
1443
1444         spin_lock_bh(&bp->phy_lock);
1445         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1446                             BMCR_SPEED1000);
1447         spin_unlock_bh(&bp->phy_lock);
1448         if (rc)
1449                 return rc;
1450
1451         for (i = 0; i < 10; i++) {
1452                 if (bnx2_test_link(bp) == 0)
1453                         break;
1454                 msleep(100);
1455         }
1456
1457         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1458         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1459                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1460                       BNX2_EMAC_MODE_25G_MODE);
1461
1462         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1463         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1464         bp->link_up = 1;
1465         return 0;
1466 }
1467
1468 static int
1469 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1470 {
1471         int i;
1472         u32 val;
1473
1474         bp->fw_wr_seq++;
1475         msg_data |= bp->fw_wr_seq;
1476
1477         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1478
1479         /* wait for an acknowledgement. */
1480         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1481                 msleep(10);
1482
1483                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1484
1485                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1486                         break;
1487         }
1488         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1489                 return 0;
1490
1491         /* If we timed out, inform the firmware that this is the case. */
1492         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1493                 if (!silent)
1494                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1495                                             "%x\n", msg_data);
1496
1497                 msg_data &= ~BNX2_DRV_MSG_CODE;
1498                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1499
1500                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1501
1502                 return -EBUSY;
1503         }
1504
1505         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1506                 return -EIO;
1507
1508         return 0;
1509 }
1510
1511 static int
1512 bnx2_init_5709_context(struct bnx2 *bp)
1513 {
1514         int i, ret = 0;
1515         u32 val;
1516
1517         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1518         val |= (BCM_PAGE_BITS - 8) << 16;
1519         REG_WR(bp, BNX2_CTX_COMMAND, val);
1520         for (i = 0; i < bp->ctx_pages; i++) {
1521                 int j;
1522
1523                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1524                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
1525                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1526                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1527                        (u64) bp->ctx_blk_mapping[i] >> 32);
1528                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1529                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1530                 for (j = 0; j < 10; j++) {
1531
1532                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1533                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1534                                 break;
1535                         udelay(5);
1536                 }
1537                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1538                         ret = -EBUSY;
1539                         break;
1540                 }
1541         }
1542         return ret;
1543 }
1544
1545 static void
1546 bnx2_init_context(struct bnx2 *bp)
1547 {
1548         u32 vcid;
1549
1550         vcid = 96;
1551         while (vcid) {
1552                 u32 vcid_addr, pcid_addr, offset;
1553
1554                 vcid--;
1555
1556                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1557                         u32 new_vcid;
1558
1559                         vcid_addr = GET_PCID_ADDR(vcid);
1560                         if (vcid & 0x8) {
1561                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1562                         }
1563                         else {
1564                                 new_vcid = vcid;
1565                         }
1566                         pcid_addr = GET_PCID_ADDR(new_vcid);
1567                 }
1568                 else {
1569                         vcid_addr = GET_CID_ADDR(vcid);
1570                         pcid_addr = vcid_addr;
1571                 }
1572
1573                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1574                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1575
1576                 /* Zero out the context. */
1577                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1578                         CTX_WR(bp, 0x00, offset, 0);
1579                 }
1580
1581                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1582                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1583         }
1584 }
1585
1586 static int
1587 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1588 {
1589         u16 *good_mbuf;
1590         u32 good_mbuf_cnt;
1591         u32 val;
1592
1593         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1594         if (good_mbuf == NULL) {
1595                 printk(KERN_ERR PFX "Failed to allocate memory in "
1596                                     "bnx2_alloc_bad_rbuf\n");
1597                 return -ENOMEM;
1598         }
1599
1600         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1601                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1602
1603         good_mbuf_cnt = 0;
1604
1605         /* Allocate a bunch of mbufs and save the good ones in an array. */
1606         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1607         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1608                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1609
1610                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1611
1612                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1613
1614                 /* The addresses with Bit 9 set are bad memory blocks. */
1615                 if (!(val & (1 << 9))) {
1616                         good_mbuf[good_mbuf_cnt] = (u16) val;
1617                         good_mbuf_cnt++;
1618                 }
1619
1620                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1621         }
1622
1623         /* Free the good ones back to the mbuf pool thus discarding
1624          * all the bad ones. */
1625         while (good_mbuf_cnt) {
1626                 good_mbuf_cnt--;
1627
1628                 val = good_mbuf[good_mbuf_cnt];
1629                 val = (val << 9) | val | 1;
1630
1631                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1632         }
1633         kfree(good_mbuf);
1634         return 0;
1635 }
1636
1637 static void
1638 bnx2_set_mac_addr(struct bnx2 *bp)
1639 {
1640         u32 val;
1641         u8 *mac_addr = bp->dev->dev_addr;
1642
1643         val = (mac_addr[0] << 8) | mac_addr[1];
1644
1645         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1646
1647         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1648                 (mac_addr[4] << 8) | mac_addr[5];
1649
1650         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1651 }
1652
1653 static inline int
1654 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1655 {
1656         struct sk_buff *skb;
1657         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1658         dma_addr_t mapping;
1659         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1660         unsigned long align;
1661
1662         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1663         if (skb == NULL) {
1664                 return -ENOMEM;
1665         }
1666
1667         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1668                 skb_reserve(skb, BNX2_RX_ALIGN - align);
1669
1670         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1671                 PCI_DMA_FROMDEVICE);
1672
1673         rx_buf->skb = skb;
1674         pci_unmap_addr_set(rx_buf, mapping, mapping);
1675
1676         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1677         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1678
1679         bp->rx_prod_bseq += bp->rx_buf_use_size;
1680
1681         return 0;
1682 }
1683
1684 static void
1685 bnx2_phy_int(struct bnx2 *bp)
1686 {
1687         u32 new_link_state, old_link_state;
1688
1689         new_link_state = bp->status_blk->status_attn_bits &
1690                 STATUS_ATTN_BITS_LINK_STATE;
1691         old_link_state = bp->status_blk->status_attn_bits_ack &
1692                 STATUS_ATTN_BITS_LINK_STATE;
1693         if (new_link_state != old_link_state) {
1694                 if (new_link_state) {
1695                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1696                                 STATUS_ATTN_BITS_LINK_STATE);
1697                 }
1698                 else {
1699                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1700                                 STATUS_ATTN_BITS_LINK_STATE);
1701                 }
1702                 bnx2_set_link(bp);
1703         }
1704 }
1705
1706 static void
1707 bnx2_tx_int(struct bnx2 *bp)
1708 {
1709         struct status_block *sblk = bp->status_blk;
1710         u16 hw_cons, sw_cons, sw_ring_cons;
1711         int tx_free_bd = 0;
1712
1713         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1714         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1715                 hw_cons++;
1716         }
1717         sw_cons = bp->tx_cons;
1718
1719         while (sw_cons != hw_cons) {
1720                 struct sw_bd *tx_buf;
1721                 struct sk_buff *skb;
1722                 int i, last;
1723
1724                 sw_ring_cons = TX_RING_IDX(sw_cons);
1725
1726                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1727                 skb = tx_buf->skb;
1728
1729                 /* partial BD completions possible with TSO packets */
1730                 if (skb_is_gso(skb)) {
1731                         u16 last_idx, last_ring_idx;
1732
1733                         last_idx = sw_cons +
1734                                 skb_shinfo(skb)->nr_frags + 1;
1735                         last_ring_idx = sw_ring_cons +
1736                                 skb_shinfo(skb)->nr_frags + 1;
1737                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1738                                 last_idx++;
1739                         }
1740                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1741                                 break;
1742                         }
1743                 }
1744
1745                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1746                         skb_headlen(skb), PCI_DMA_TODEVICE);
1747
1748                 tx_buf->skb = NULL;
1749                 last = skb_shinfo(skb)->nr_frags;
1750
1751                 for (i = 0; i < last; i++) {
1752                         sw_cons = NEXT_TX_BD(sw_cons);
1753
1754                         pci_unmap_page(bp->pdev,
1755                                 pci_unmap_addr(
1756                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1757                                         mapping),
1758                                 skb_shinfo(skb)->frags[i].size,
1759                                 PCI_DMA_TODEVICE);
1760                 }
1761
1762                 sw_cons = NEXT_TX_BD(sw_cons);
1763
1764                 tx_free_bd += last + 1;
1765
1766                 dev_kfree_skb(skb);
1767
1768                 hw_cons = bp->hw_tx_cons =
1769                         sblk->status_tx_quick_consumer_index0;
1770
1771                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1772                         hw_cons++;
1773                 }
1774         }
1775
1776         bp->tx_cons = sw_cons;
1777         /* Need to make the tx_cons update visible to bnx2_start_xmit()
1778          * before checking for netif_queue_stopped().  Without the
1779          * memory barrier, there is a small possibility that bnx2_start_xmit()
1780          * will miss it and cause the queue to be stopped forever.
1781          */
1782         smp_mb();
1783
1784         if (unlikely(netif_queue_stopped(bp->dev)) &&
1785                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1786                 netif_tx_lock(bp->dev);
1787                 if ((netif_queue_stopped(bp->dev)) &&
1788                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1789                         netif_wake_queue(bp->dev);
1790                 netif_tx_unlock(bp->dev);
1791         }
1792 }
1793
1794 static inline void
1795 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1796         u16 cons, u16 prod)
1797 {
1798         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1799         struct rx_bd *cons_bd, *prod_bd;
1800
1801         cons_rx_buf = &bp->rx_buf_ring[cons];
1802         prod_rx_buf = &bp->rx_buf_ring[prod];
1803
1804         pci_dma_sync_single_for_device(bp->pdev,
1805                 pci_unmap_addr(cons_rx_buf, mapping),
1806                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1807
1808         bp->rx_prod_bseq += bp->rx_buf_use_size;
1809
1810         prod_rx_buf->skb = skb;
1811
1812         if (cons == prod)
1813                 return;
1814
1815         pci_unmap_addr_set(prod_rx_buf, mapping,
1816                         pci_unmap_addr(cons_rx_buf, mapping));
1817
1818         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1819         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1820         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1821         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1822 }
1823
1824 static int
1825 bnx2_rx_int(struct bnx2 *bp, int budget)
1826 {
1827         struct status_block *sblk = bp->status_blk;
1828         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1829         struct l2_fhdr *rx_hdr;
1830         int rx_pkt = 0;
1831
1832         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1833         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1834                 hw_cons++;
1835         }
1836         sw_cons = bp->rx_cons;
1837         sw_prod = bp->rx_prod;
1838
1839         /* Memory barrier necessary as speculative reads of the rx
1840          * buffer can be ahead of the index in the status block
1841          */
1842         rmb();
1843         while (sw_cons != hw_cons) {
1844                 unsigned int len;
1845                 u32 status;
1846                 struct sw_bd *rx_buf;
1847                 struct sk_buff *skb;
1848                 dma_addr_t dma_addr;
1849
1850                 sw_ring_cons = RX_RING_IDX(sw_cons);
1851                 sw_ring_prod = RX_RING_IDX(sw_prod);
1852
1853                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1854                 skb = rx_buf->skb;
1855
1856                 rx_buf->skb = NULL;
1857
1858                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1859
1860                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1861                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1862
1863                 rx_hdr = (struct l2_fhdr *) skb->data;
1864                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1865
1866                 if ((status = rx_hdr->l2_fhdr_status) &
1867                         (L2_FHDR_ERRORS_BAD_CRC |
1868                         L2_FHDR_ERRORS_PHY_DECODE |
1869                         L2_FHDR_ERRORS_ALIGNMENT |
1870                         L2_FHDR_ERRORS_TOO_SHORT |
1871                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1872
1873                         goto reuse_rx;
1874                 }
1875
1876                 /* Since we don't have a jumbo ring, copy small packets
1877                  * if mtu > 1500
1878                  */
1879                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1880                         struct sk_buff *new_skb;
1881
1882                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
1883                         if (new_skb == NULL)
1884                                 goto reuse_rx;
1885
1886                         /* aligned copy */
1887                         memcpy(new_skb->data,
1888                                 skb->data + bp->rx_offset - 2,
1889                                 len + 2);
1890
1891                         skb_reserve(new_skb, 2);
1892                         skb_put(new_skb, len);
1893
1894                         bnx2_reuse_rx_skb(bp, skb,
1895                                 sw_ring_cons, sw_ring_prod);
1896
1897                         skb = new_skb;
1898                 }
1899                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1900                         pci_unmap_single(bp->pdev, dma_addr,
1901                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1902
1903                         skb_reserve(skb, bp->rx_offset);
1904                         skb_put(skb, len);
1905                 }
1906                 else {
1907 reuse_rx:
1908                         bnx2_reuse_rx_skb(bp, skb,
1909                                 sw_ring_cons, sw_ring_prod);
1910                         goto next_rx;
1911                 }
1912
1913                 skb->protocol = eth_type_trans(skb, bp->dev);
1914
1915                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1916                         (ntohs(skb->protocol) != 0x8100)) {
1917
1918                         dev_kfree_skb(skb);
1919                         goto next_rx;
1920
1921                 }
1922
1923                 skb->ip_summed = CHECKSUM_NONE;
1924                 if (bp->rx_csum &&
1925                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1926                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1927
1928                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1929                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1930                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1931                 }
1932
1933 #ifdef BCM_VLAN
1934                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1935                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1936                                 rx_hdr->l2_fhdr_vlan_tag);
1937                 }
1938                 else
1939 #endif
1940                         netif_receive_skb(skb);
1941
1942                 bp->dev->last_rx = jiffies;
1943                 rx_pkt++;
1944
1945 next_rx:
1946                 sw_cons = NEXT_RX_BD(sw_cons);
1947                 sw_prod = NEXT_RX_BD(sw_prod);
1948
1949                 if ((rx_pkt == budget))
1950                         break;
1951
1952                 /* Refresh hw_cons to see if there is new work */
1953                 if (sw_cons == hw_cons) {
1954                         hw_cons = bp->hw_rx_cons =
1955                                 sblk->status_rx_quick_consumer_index0;
1956                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1957                                 hw_cons++;
1958                         rmb();
1959                 }
1960         }
1961         bp->rx_cons = sw_cons;
1962         bp->rx_prod = sw_prod;
1963
1964         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1965
1966         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1967
1968         mmiowb();
1969
1970         return rx_pkt;
1971
1972 }
1973
1974 /* MSI ISR - The only difference between this and the INTx ISR
1975  * is that the MSI interrupt is always serviced.
1976  */
1977 static irqreturn_t
1978 bnx2_msi(int irq, void *dev_instance)
1979 {
1980         struct net_device *dev = dev_instance;
1981         struct bnx2 *bp = netdev_priv(dev);
1982
1983         prefetch(bp->status_blk);
1984         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1985                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1986                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1987
1988         /* Return here if interrupt is disabled. */
1989         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1990                 return IRQ_HANDLED;
1991
1992         netif_rx_schedule(dev);
1993
1994         return IRQ_HANDLED;
1995 }
1996
1997 static irqreturn_t
1998 bnx2_interrupt(int irq, void *dev_instance)
1999 {
2000         struct net_device *dev = dev_instance;
2001         struct bnx2 *bp = netdev_priv(dev);
2002
2003         /* When using INTx, it is possible for the interrupt to arrive
2004          * at the CPU before the status block posted prior to the
2005          * interrupt. Reading a register will flush the status block.
2006          * When using MSI, the MSI message will always complete after
2007          * the status block write.
2008          */
2009         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2010             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2011              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2012                 return IRQ_NONE;
2013
2014         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2015                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2016                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2017
2018         /* Return here if interrupt is shared and is disabled. */
2019         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2020                 return IRQ_HANDLED;
2021
2022         netif_rx_schedule(dev);
2023
2024         return IRQ_HANDLED;
2025 }
2026
2027 static inline int
2028 bnx2_has_work(struct bnx2 *bp)
2029 {
2030         struct status_block *sblk = bp->status_blk;
2031
2032         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2033             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2034                 return 1;
2035
2036         if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2037             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
2038                 return 1;
2039
2040         return 0;
2041 }
2042
2043 static int
2044 bnx2_poll(struct net_device *dev, int *budget)
2045 {
2046         struct bnx2 *bp = netdev_priv(dev);
2047
2048         if ((bp->status_blk->status_attn_bits &
2049                 STATUS_ATTN_BITS_LINK_STATE) !=
2050                 (bp->status_blk->status_attn_bits_ack &
2051                 STATUS_ATTN_BITS_LINK_STATE)) {
2052
2053                 spin_lock(&bp->phy_lock);
2054                 bnx2_phy_int(bp);
2055                 spin_unlock(&bp->phy_lock);
2056
2057                 /* This is needed to take care of transient status
2058                  * during link changes.
2059                  */
2060                 REG_WR(bp, BNX2_HC_COMMAND,
2061                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2062                 REG_RD(bp, BNX2_HC_COMMAND);
2063         }
2064
2065         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2066                 bnx2_tx_int(bp);
2067
2068         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2069                 int orig_budget = *budget;
2070                 int work_done;
2071
2072                 if (orig_budget > dev->quota)
2073                         orig_budget = dev->quota;
2074
2075                 work_done = bnx2_rx_int(bp, orig_budget);
2076                 *budget -= work_done;
2077                 dev->quota -= work_done;
2078         }
2079
2080         bp->last_status_idx = bp->status_blk->status_idx;
2081         rmb();
2082
2083         if (!bnx2_has_work(bp)) {
2084                 netif_rx_complete(dev);
2085                 if (likely(bp->flags & USING_MSI_FLAG)) {
2086                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2087                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2088                                bp->last_status_idx);
2089                         return 0;
2090                 }
2091                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2092                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2093                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2094                        bp->last_status_idx);
2095
2096                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2097                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2098                        bp->last_status_idx);
2099                 return 0;
2100         }
2101
2102         return 1;
2103 }
2104
2105 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2106  * from set_multicast.
2107  */
2108 static void
2109 bnx2_set_rx_mode(struct net_device *dev)
2110 {
2111         struct bnx2 *bp = netdev_priv(dev);
2112         u32 rx_mode, sort_mode;
2113         int i;
2114
2115         spin_lock_bh(&bp->phy_lock);
2116
2117         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2118                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2119         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2120 #ifdef BCM_VLAN
2121         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2122                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2123 #else
2124         if (!(bp->flags & ASF_ENABLE_FLAG))
2125                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2126 #endif
2127         if (dev->flags & IFF_PROMISC) {
2128                 /* Promiscuous mode. */
2129                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2130                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2131                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2132         }
2133         else if (dev->flags & IFF_ALLMULTI) {
2134                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2135                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2136                                0xffffffff);
2137                 }
2138                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2139         }
2140         else {
2141                 /* Accept one or more multicast(s). */
2142                 struct dev_mc_list *mclist;
2143                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2144                 u32 regidx;
2145                 u32 bit;
2146                 u32 crc;
2147
2148                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2149
2150                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2151                      i++, mclist = mclist->next) {
2152
2153                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2154                         bit = crc & 0xff;
2155                         regidx = (bit & 0xe0) >> 5;
2156                         bit &= 0x1f;
2157                         mc_filter[regidx] |= (1 << bit);
2158                 }
2159
2160                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2161                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2162                                mc_filter[i]);
2163                 }
2164
2165                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2166         }
2167
2168         if (rx_mode != bp->rx_mode) {
2169                 bp->rx_mode = rx_mode;
2170                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2171         }
2172
2173         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2174         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2175         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2176
2177         spin_unlock_bh(&bp->phy_lock);
2178 }
2179
2180 #define FW_BUF_SIZE     0x8000
2181
2182 static int
2183 bnx2_gunzip_init(struct bnx2 *bp)
2184 {
2185         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2186                 goto gunzip_nomem1;
2187
2188         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2189                 goto gunzip_nomem2;
2190
2191         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2192         if (bp->strm->workspace == NULL)
2193                 goto gunzip_nomem3;
2194
2195         return 0;
2196
2197 gunzip_nomem3:
2198         kfree(bp->strm);
2199         bp->strm = NULL;
2200
2201 gunzip_nomem2:
2202         vfree(bp->gunzip_buf);
2203         bp->gunzip_buf = NULL;
2204
2205 gunzip_nomem1:
2206         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2207                             "uncompression.\n", bp->dev->name);
2208         return -ENOMEM;
2209 }
2210
2211 static void
2212 bnx2_gunzip_end(struct bnx2 *bp)
2213 {
2214         kfree(bp->strm->workspace);
2215
2216         kfree(bp->strm);
2217         bp->strm = NULL;
2218
2219         if (bp->gunzip_buf) {
2220                 vfree(bp->gunzip_buf);
2221                 bp->gunzip_buf = NULL;
2222         }
2223 }
2224
2225 static int
2226 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2227 {
2228         int n, rc;
2229
2230         /* check gzip header */
2231         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2232                 return -EINVAL;
2233
2234         n = 10;
2235
2236 #define FNAME   0x8
2237         if (zbuf[3] & FNAME)
2238                 while ((zbuf[n++] != 0) && (n < len));
2239
2240         bp->strm->next_in = zbuf + n;
2241         bp->strm->avail_in = len - n;
2242         bp->strm->next_out = bp->gunzip_buf;
2243         bp->strm->avail_out = FW_BUF_SIZE;
2244
2245         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2246         if (rc != Z_OK)
2247                 return rc;
2248
2249         rc = zlib_inflate(bp->strm, Z_FINISH);
2250
2251         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2252         *outbuf = bp->gunzip_buf;
2253
2254         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2255                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2256                        bp->dev->name, bp->strm->msg);
2257
2258         zlib_inflateEnd(bp->strm);
2259
2260         if (rc == Z_STREAM_END)
2261                 return 0;
2262
2263         return rc;
2264 }
2265
2266 static void
2267 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2268         u32 rv2p_proc)
2269 {
2270         int i;
2271         u32 val;
2272
2273
2274         for (i = 0; i < rv2p_code_len; i += 8) {
2275                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2276                 rv2p_code++;
2277                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2278                 rv2p_code++;
2279
2280                 if (rv2p_proc == RV2P_PROC1) {
2281                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2282                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2283                 }
2284                 else {
2285                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2286                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2287                 }
2288         }
2289
2290         /* Reset the processor, un-stall is done later. */
2291         if (rv2p_proc == RV2P_PROC1) {
2292                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2293         }
2294         else {
2295                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2296         }
2297 }
2298
2299 static int
2300 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2301 {
2302         u32 offset;
2303         u32 val;
2304         int rc;
2305
2306         /* Halt the CPU. */
2307         val = REG_RD_IND(bp, cpu_reg->mode);
2308         val |= cpu_reg->mode_value_halt;
2309         REG_WR_IND(bp, cpu_reg->mode, val);
2310         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2311
2312         /* Load the Text area. */
2313         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2314         if (fw->gz_text) {
2315                 u32 text_len;
2316                 void *text;
2317
2318                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2319                                  &text_len);
2320                 if (rc)
2321                         return rc;
2322
2323                 fw->text = text;
2324         }
2325         if (fw->gz_text) {
2326                 int j;
2327
2328                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2329                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2330                 }
2331         }
2332
2333         /* Load the Data area. */
2334         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2335         if (fw->data) {
2336                 int j;
2337
2338                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2339                         REG_WR_IND(bp, offset, fw->data[j]);
2340                 }
2341         }
2342
2343         /* Load the SBSS area. */
2344         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2345         if (fw->sbss) {
2346                 int j;
2347
2348                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2349                         REG_WR_IND(bp, offset, fw->sbss[j]);
2350                 }
2351         }
2352
2353         /* Load the BSS area. */
2354         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2355         if (fw->bss) {
2356                 int j;
2357
2358                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2359                         REG_WR_IND(bp, offset, fw->bss[j]);
2360                 }
2361         }
2362
2363         /* Load the Read-Only area. */
2364         offset = cpu_reg->spad_base +
2365                 (fw->rodata_addr - cpu_reg->mips_view_base);
2366         if (fw->rodata) {
2367                 int j;
2368
2369                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2370                         REG_WR_IND(bp, offset, fw->rodata[j]);
2371                 }
2372         }
2373
2374         /* Clear the pre-fetch instruction. */
2375         REG_WR_IND(bp, cpu_reg->inst, 0);
2376         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2377
2378         /* Start the CPU. */
2379         val = REG_RD_IND(bp, cpu_reg->mode);
2380         val &= ~cpu_reg->mode_value_halt;
2381         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2382         REG_WR_IND(bp, cpu_reg->mode, val);
2383
2384         return 0;
2385 }
2386
2387 static int
2388 bnx2_init_cpus(struct bnx2 *bp)
2389 {
2390         struct cpu_reg cpu_reg;
2391         struct fw_info *fw;
2392         int rc = 0;
2393         void *text;
2394         u32 text_len;
2395
2396         if ((rc = bnx2_gunzip_init(bp)) != 0)
2397                 return rc;
2398
2399         /* Initialize the RV2P processor. */
2400         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2401                          &text_len);
2402         if (rc)
2403                 goto init_cpu_err;
2404
2405         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2406
2407         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2408                          &text_len);
2409         if (rc)
2410                 goto init_cpu_err;
2411
2412         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2413
2414         /* Initialize the RX Processor. */
2415         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2416         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2417         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2418         cpu_reg.state = BNX2_RXP_CPU_STATE;
2419         cpu_reg.state_value_clear = 0xffffff;
2420         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2421         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2422         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2423         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2424         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2425         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2426         cpu_reg.mips_view_base = 0x8000000;
2427
2428         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2429                 fw = &bnx2_rxp_fw_09;
2430         else
2431                 fw = &bnx2_rxp_fw_06;
2432
2433         rc = load_cpu_fw(bp, &cpu_reg, fw);
2434         if (rc)
2435                 goto init_cpu_err;
2436
2437         /* Initialize the TX Processor. */
2438         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2439         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2440         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2441         cpu_reg.state = BNX2_TXP_CPU_STATE;
2442         cpu_reg.state_value_clear = 0xffffff;
2443         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2444         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2445         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2446         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2447         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2448         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2449         cpu_reg.mips_view_base = 0x8000000;
2450
2451         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2452                 fw = &bnx2_txp_fw_09;
2453         else
2454                 fw = &bnx2_txp_fw_06;
2455
2456         rc = load_cpu_fw(bp, &cpu_reg, fw);
2457         if (rc)
2458                 goto init_cpu_err;
2459
2460         /* Initialize the TX Patch-up Processor. */
2461         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2462         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2463         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2464         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2465         cpu_reg.state_value_clear = 0xffffff;
2466         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2467         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2468         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2469         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2470         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2471         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2472         cpu_reg.mips_view_base = 0x8000000;
2473
2474         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2475                 fw = &bnx2_tpat_fw_09;
2476         else
2477                 fw = &bnx2_tpat_fw_06;
2478
2479         rc = load_cpu_fw(bp, &cpu_reg, fw);
2480         if (rc)
2481                 goto init_cpu_err;
2482
2483         /* Initialize the Completion Processor. */
2484         cpu_reg.mode = BNX2_COM_CPU_MODE;
2485         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2486         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2487         cpu_reg.state = BNX2_COM_CPU_STATE;
2488         cpu_reg.state_value_clear = 0xffffff;
2489         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2490         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2491         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2492         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2493         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2494         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2495         cpu_reg.mips_view_base = 0x8000000;
2496
2497         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2498                 fw = &bnx2_com_fw_09;
2499         else
2500                 fw = &bnx2_com_fw_06;
2501
2502         rc = load_cpu_fw(bp, &cpu_reg, fw);
2503         if (rc)
2504                 goto init_cpu_err;
2505
2506         /* Initialize the Command Processor. */
2507         cpu_reg.mode = BNX2_CP_CPU_MODE;
2508         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2509         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2510         cpu_reg.state = BNX2_CP_CPU_STATE;
2511         cpu_reg.state_value_clear = 0xffffff;
2512         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2513         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2514         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2515         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2516         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2517         cpu_reg.spad_base = BNX2_CP_SCRATCH;
2518         cpu_reg.mips_view_base = 0x8000000;
2519
2520         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2521                 fw = &bnx2_cp_fw_09;
2522
2523                 rc = load_cpu_fw(bp, &cpu_reg, fw);
2524                 if (rc)
2525                         goto init_cpu_err;
2526         }
2527 init_cpu_err:
2528         bnx2_gunzip_end(bp);
2529         return rc;
2530 }
2531
2532 static int
2533 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2534 {
2535         u16 pmcsr;
2536
2537         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2538
2539         switch (state) {
2540         case PCI_D0: {
2541                 u32 val;
2542
2543                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2544                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2545                         PCI_PM_CTRL_PME_STATUS);
2546
2547                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2548                         /* delay required during transition out of D3hot */
2549                         msleep(20);
2550
2551                 val = REG_RD(bp, BNX2_EMAC_MODE);
2552                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2553                 val &= ~BNX2_EMAC_MODE_MPKT;
2554                 REG_WR(bp, BNX2_EMAC_MODE, val);
2555
2556                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2557                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2558                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2559                 break;
2560         }
2561         case PCI_D3hot: {
2562                 int i;
2563                 u32 val, wol_msg;
2564
2565                 if (bp->wol) {
2566                         u32 advertising;
2567                         u8 autoneg;
2568
2569                         autoneg = bp->autoneg;
2570                         advertising = bp->advertising;
2571
2572                         bp->autoneg = AUTONEG_SPEED;
2573                         bp->advertising = ADVERTISED_10baseT_Half |
2574                                 ADVERTISED_10baseT_Full |
2575                                 ADVERTISED_100baseT_Half |
2576                                 ADVERTISED_100baseT_Full |
2577                                 ADVERTISED_Autoneg;
2578
2579                         bnx2_setup_copper_phy(bp);
2580
2581                         bp->autoneg = autoneg;
2582                         bp->advertising = advertising;
2583
2584                         bnx2_set_mac_addr(bp);
2585
2586                         val = REG_RD(bp, BNX2_EMAC_MODE);
2587
2588                         /* Enable port mode. */
2589                         val &= ~BNX2_EMAC_MODE_PORT;
2590                         val |= BNX2_EMAC_MODE_PORT_MII |
2591                                BNX2_EMAC_MODE_MPKT_RCVD |
2592                                BNX2_EMAC_MODE_ACPI_RCVD |
2593                                BNX2_EMAC_MODE_MPKT;
2594
2595                         REG_WR(bp, BNX2_EMAC_MODE, val);
2596
2597                         /* receive all multicast */
2598                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2599                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2600                                        0xffffffff);
2601                         }
2602                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2603                                BNX2_EMAC_RX_MODE_SORT_MODE);
2604
2605                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2606                               BNX2_RPM_SORT_USER0_MC_EN;
2607                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2608                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2609                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2610                                BNX2_RPM_SORT_USER0_ENA);
2611
2612                         /* Need to enable EMAC and RPM for WOL. */
2613                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2614                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2615                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2616                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2617
2618                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2619                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2620                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2621
2622                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2623                 }
2624                 else {
2625                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2626                 }
2627
2628                 if (!(bp->flags & NO_WOL_FLAG))
2629                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2630
2631                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2632                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2633                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2634
2635                         if (bp->wol)
2636                                 pmcsr |= 3;
2637                 }
2638                 else {
2639                         pmcsr |= 3;
2640                 }
2641                 if (bp->wol) {
2642                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2643                 }
2644                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2645                                       pmcsr);
2646
2647                 /* No more memory access after this point until
2648                  * device is brought back to D0.
2649                  */
2650                 udelay(50);
2651                 break;
2652         }
2653         default:
2654                 return -EINVAL;
2655         }
2656         return 0;
2657 }
2658
2659 static int
2660 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2661 {
2662         u32 val;
2663         int j;
2664
2665         /* Request access to the flash interface. */
2666         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2667         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2668                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2669                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2670                         break;
2671
2672                 udelay(5);
2673         }
2674
2675         if (j >= NVRAM_TIMEOUT_COUNT)
2676                 return -EBUSY;
2677
2678         return 0;
2679 }
2680
2681 static int
2682 bnx2_release_nvram_lock(struct bnx2 *bp)
2683 {
2684         int j;
2685         u32 val;
2686
2687         /* Relinquish nvram interface. */
2688         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2689
2690         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2691                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2692                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2693                         break;
2694
2695                 udelay(5);
2696         }
2697
2698         if (j >= NVRAM_TIMEOUT_COUNT)
2699                 return -EBUSY;
2700
2701         return 0;
2702 }
2703
2704
2705 static int
2706 bnx2_enable_nvram_write(struct bnx2 *bp)
2707 {
2708         u32 val;
2709
2710         val = REG_RD(bp, BNX2_MISC_CFG);
2711         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2712
2713         if (!bp->flash_info->buffered) {
2714                 int j;
2715
2716                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2717                 REG_WR(bp, BNX2_NVM_COMMAND,
2718                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2719
2720                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2721                         udelay(5);
2722
2723                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2724                         if (val & BNX2_NVM_COMMAND_DONE)
2725                                 break;
2726                 }
2727
2728                 if (j >= NVRAM_TIMEOUT_COUNT)
2729                         return -EBUSY;
2730         }
2731         return 0;
2732 }
2733
2734 static void
2735 bnx2_disable_nvram_write(struct bnx2 *bp)
2736 {
2737         u32 val;
2738
2739         val = REG_RD(bp, BNX2_MISC_CFG);
2740         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2741 }
2742
2743
2744 static void
2745 bnx2_enable_nvram_access(struct bnx2 *bp)
2746 {
2747         u32 val;
2748
2749         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2750         /* Enable both bits, even on read. */
2751         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2752                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2753 }
2754
2755 static void
2756 bnx2_disable_nvram_access(struct bnx2 *bp)
2757 {
2758         u32 val;
2759
2760         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2761         /* Disable both bits, even after read. */
2762         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2763                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2764                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2765 }
2766
2767 static int
2768 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2769 {
2770         u32 cmd;
2771         int j;
2772
2773         if (bp->flash_info->buffered)
2774                 /* Buffered flash, no erase needed */
2775                 return 0;
2776
2777         /* Build an erase command */
2778         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2779               BNX2_NVM_COMMAND_DOIT;
2780
2781         /* Need to clear DONE bit separately. */
2782         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2783
2784         /* Address of the NVRAM to read from. */
2785         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2786
2787         /* Issue an erase command. */
2788         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2789
2790         /* Wait for completion. */
2791         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2792                 u32 val;
2793
2794                 udelay(5);
2795
2796                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2797                 if (val & BNX2_NVM_COMMAND_DONE)
2798                         break;
2799         }
2800
2801         if (j >= NVRAM_TIMEOUT_COUNT)
2802                 return -EBUSY;
2803
2804         return 0;
2805 }
2806
2807 static int
2808 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2809 {
2810         u32 cmd;
2811         int j;
2812
2813         /* Build the command word. */
2814         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2815
2816         /* Calculate an offset of a buffered flash. */
2817         if (bp->flash_info->buffered) {
2818                 offset = ((offset / bp->flash_info->page_size) <<
2819                            bp->flash_info->page_bits) +
2820                           (offset % bp->flash_info->page_size);
2821         }
2822
2823         /* Need to clear DONE bit separately. */
2824         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2825
2826         /* Address of the NVRAM to read from. */
2827         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2828
2829         /* Issue a read command. */
2830         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2831
2832         /* Wait for completion. */
2833         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2834                 u32 val;
2835
2836                 udelay(5);
2837
2838                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2839                 if (val & BNX2_NVM_COMMAND_DONE) {
2840                         val = REG_RD(bp, BNX2_NVM_READ);
2841
2842                         val = be32_to_cpu(val);
2843                         memcpy(ret_val, &val, 4);
2844                         break;
2845                 }
2846         }
2847         if (j >= NVRAM_TIMEOUT_COUNT)
2848                 return -EBUSY;
2849
2850         return 0;
2851 }
2852
2853
2854 static int
2855 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2856 {
2857         u32 cmd, val32;
2858         int j;
2859
2860         /* Build the command word. */
2861         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2862
2863         /* Calculate an offset of a buffered flash. */
2864         if (bp->flash_info->buffered) {
2865                 offset = ((offset / bp->flash_info->page_size) <<
2866                           bp->flash_info->page_bits) +
2867                          (offset % bp->flash_info->page_size);
2868         }
2869
2870         /* Need to clear DONE bit separately. */
2871         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2872
2873         memcpy(&val32, val, 4);
2874         val32 = cpu_to_be32(val32);
2875
2876         /* Write the data. */
2877         REG_WR(bp, BNX2_NVM_WRITE, val32);
2878
2879         /* Address of the NVRAM to write to. */
2880         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2881
2882         /* Issue the write command. */
2883         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2884
2885         /* Wait for completion. */
2886         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2887                 udelay(5);
2888
2889                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2890                         break;
2891         }
2892         if (j >= NVRAM_TIMEOUT_COUNT)
2893                 return -EBUSY;
2894
2895         return 0;
2896 }
2897
2898 static int
2899 bnx2_init_nvram(struct bnx2 *bp)
2900 {
2901         u32 val;
2902         int j, entry_count, rc;
2903         struct flash_spec *flash;
2904
2905         /* Determine the selected interface. */
2906         val = REG_RD(bp, BNX2_NVM_CFG1);
2907
2908         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2909
2910         rc = 0;
2911         if (val & 0x40000000) {
2912
2913                 /* Flash interface has been reconfigured */
2914                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2915                      j++, flash++) {
2916                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2917                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2918                                 bp->flash_info = flash;
2919                                 break;
2920                         }
2921                 }
2922         }
2923         else {
2924                 u32 mask;
2925                 /* Not yet been reconfigured */
2926
2927                 if (val & (1 << 23))
2928                         mask = FLASH_BACKUP_STRAP_MASK;
2929                 else
2930                         mask = FLASH_STRAP_MASK;
2931
2932                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2933                         j++, flash++) {
2934
2935                         if ((val & mask) == (flash->strapping & mask)) {
2936                                 bp->flash_info = flash;
2937
2938                                 /* Request access to the flash interface. */
2939                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2940                                         return rc;
2941
2942                                 /* Enable access to flash interface */
2943                                 bnx2_enable_nvram_access(bp);
2944
2945                                 /* Reconfigure the flash interface */
2946                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2947                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2948                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2949                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2950
2951                                 /* Disable access to flash interface */
2952                                 bnx2_disable_nvram_access(bp);
2953                                 bnx2_release_nvram_lock(bp);
2954
2955                                 break;
2956                         }
2957                 }
2958         } /* if (val & 0x40000000) */
2959
2960         if (j == entry_count) {
2961                 bp->flash_info = NULL;
2962                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2963                 return -ENODEV;
2964         }
2965
2966         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2967         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2968         if (val)
2969                 bp->flash_size = val;
2970         else
2971                 bp->flash_size = bp->flash_info->total_size;
2972
2973         return rc;
2974 }
2975
2976 static int
2977 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2978                 int buf_size)
2979 {
2980         int rc = 0;
2981         u32 cmd_flags, offset32, len32, extra;
2982
2983         if (buf_size == 0)
2984                 return 0;
2985
2986         /* Request access to the flash interface. */
2987         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2988                 return rc;
2989
2990         /* Enable access to flash interface */
2991         bnx2_enable_nvram_access(bp);
2992
2993         len32 = buf_size;
2994         offset32 = offset;
2995         extra = 0;
2996
2997         cmd_flags = 0;
2998
2999         if (offset32 & 3) {
3000                 u8 buf[4];
3001                 u32 pre_len;
3002
3003                 offset32 &= ~3;
3004                 pre_len = 4 - (offset & 3);
3005
3006                 if (pre_len >= len32) {
3007                         pre_len = len32;
3008                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3009                                     BNX2_NVM_COMMAND_LAST;
3010                 }
3011                 else {
3012                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3013                 }
3014
3015                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3016
3017                 if (rc)
3018                         return rc;
3019
3020                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3021
3022                 offset32 += 4;
3023                 ret_buf += pre_len;
3024                 len32 -= pre_len;
3025         }
3026         if (len32 & 3) {
3027                 extra = 4 - (len32 & 3);
3028                 len32 = (len32 + 4) & ~3;
3029         }
3030
3031         if (len32 == 4) {
3032                 u8 buf[4];
3033
3034                 if (cmd_flags)
3035                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3036                 else
3037                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3038                                     BNX2_NVM_COMMAND_LAST;
3039
3040                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3041
3042                 memcpy(ret_buf, buf, 4 - extra);
3043         }
3044         else if (len32 > 0) {
3045                 u8 buf[4];
3046
3047                 /* Read the first word. */
3048                 if (cmd_flags)
3049                         cmd_flags = 0;
3050                 else
3051                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3052
3053                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3054
3055                 /* Advance to the next dword. */
3056                 offset32 += 4;
3057                 ret_buf += 4;
3058                 len32 -= 4;
3059
3060                 while (len32 > 4 && rc == 0) {
3061                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3062
3063                         /* Advance to the next dword. */
3064                         offset32 += 4;
3065                         ret_buf += 4;
3066                         len32 -= 4;
3067                 }
3068
3069                 if (rc)
3070                         return rc;
3071
3072                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3073                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3074
3075                 memcpy(ret_buf, buf, 4 - extra);
3076         }
3077
3078         /* Disable access to flash interface */
3079         bnx2_disable_nvram_access(bp);
3080
3081         bnx2_release_nvram_lock(bp);
3082
3083         return rc;
3084 }
3085
3086 static int
3087 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3088                 int buf_size)
3089 {
3090         u32 written, offset32, len32;
3091         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3092         int rc = 0;
3093         int align_start, align_end;
3094
3095         buf = data_buf;
3096         offset32 = offset;
3097         len32 = buf_size;
3098         align_start = align_end = 0;
3099
3100         if ((align_start = (offset32 & 3))) {
3101                 offset32 &= ~3;
3102                 len32 += align_start;
3103                 if (len32 < 4)
3104                         len32 = 4;
3105                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3106                         return rc;
3107         }
3108
3109         if (len32 & 3) {
3110                 align_end = 4 - (len32 & 3);
3111                 len32 += align_end;
3112                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3113                         return rc;
3114         }
3115
3116         if (align_start || align_end) {
3117                 align_buf = kmalloc(len32, GFP_KERNEL);
3118                 if (align_buf == NULL)
3119                         return -ENOMEM;
3120                 if (align_start) {
3121                         memcpy(align_buf, start, 4);
3122                 }
3123                 if (align_end) {
3124                         memcpy(align_buf + len32 - 4, end, 4);
3125                 }
3126                 memcpy(align_buf + align_start, data_buf, buf_size);
3127                 buf = align_buf;
3128         }
3129
3130         if (bp->flash_info->buffered == 0) {
3131                 flash_buffer = kmalloc(264, GFP_KERNEL);
3132                 if (flash_buffer == NULL) {
3133                         rc = -ENOMEM;
3134                         goto nvram_write_end;
3135                 }
3136         }
3137
3138         written = 0;
3139         while ((written < len32) && (rc == 0)) {
3140                 u32 page_start, page_end, data_start, data_end;
3141                 u32 addr, cmd_flags;
3142                 int i;
3143
3144                 /* Find the page_start addr */
3145                 page_start = offset32 + written;
3146                 page_start -= (page_start % bp->flash_info->page_size);
3147                 /* Find the page_end addr */
3148                 page_end = page_start + bp->flash_info->page_size;
3149                 /* Find the data_start addr */
3150                 data_start = (written == 0) ? offset32 : page_start;
3151                 /* Find the data_end addr */
3152                 data_end = (page_end > offset32 + len32) ?
3153                         (offset32 + len32) : page_end;
3154
3155                 /* Request access to the flash interface. */
3156                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3157                         goto nvram_write_end;
3158
3159                 /* Enable access to flash interface */
3160                 bnx2_enable_nvram_access(bp);
3161
3162                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3163                 if (bp->flash_info->buffered == 0) {
3164                         int j;
3165
3166                         /* Read the whole page into the buffer
3167                          * (non-buffer flash only) */
3168                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3169                                 if (j == (bp->flash_info->page_size - 4)) {
3170                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3171                                 }
3172                                 rc = bnx2_nvram_read_dword(bp,
3173                                         page_start + j,
3174                                         &flash_buffer[j],
3175                                         cmd_flags);
3176
3177                                 if (rc)
3178                                         goto nvram_write_end;
3179
3180                                 cmd_flags = 0;
3181                         }
3182                 }
3183
3184                 /* Enable writes to flash interface (unlock write-protect) */
3185                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3186                         goto nvram_write_end;
3187
3188                 /* Loop to write back the buffer data from page_start to
3189                  * data_start */
3190                 i = 0;
3191                 if (bp->flash_info->buffered == 0) {
3192                         /* Erase the page */
3193                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3194                                 goto nvram_write_end;
3195
3196                         /* Re-enable the write again for the actual write */
3197                         bnx2_enable_nvram_write(bp);
3198
3199                         for (addr = page_start; addr < data_start;
3200                                 addr += 4, i += 4) {
3201
3202                                 rc = bnx2_nvram_write_dword(bp, addr,
3203                                         &flash_buffer[i], cmd_flags);
3204
3205                                 if (rc != 0)
3206                                         goto nvram_write_end;
3207
3208                                 cmd_flags = 0;
3209                         }
3210                 }
3211
3212                 /* Loop to write the new data from data_start to data_end */
3213                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3214                         if ((addr == page_end - 4) ||
3215                                 ((bp->flash_info->buffered) &&
3216                                  (addr == data_end - 4))) {
3217
3218                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3219                         }
3220                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3221                                 cmd_flags);
3222
3223                         if (rc != 0)
3224                                 goto nvram_write_end;
3225
3226                         cmd_flags = 0;
3227                         buf += 4;
3228                 }
3229
3230                 /* Loop to write back the buffer data from data_end
3231                  * to page_end */
3232                 if (bp->flash_info->buffered == 0) {
3233                         for (addr = data_end; addr < page_end;
3234                                 addr += 4, i += 4) {
3235
3236                                 if (addr == page_end-4) {
3237                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3238                                 }
3239                                 rc = bnx2_nvram_write_dword(bp, addr,
3240                                         &flash_buffer[i], cmd_flags);
3241
3242                                 if (rc != 0)
3243                                         goto nvram_write_end;
3244