Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/bnx2-2.6
[sfrench/cifs-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #ifdef NETIF_F_TSO
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #define BCM_TSO 1
47 #endif
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
53
54 #include "bnx2.h"
55 #include "bnx2_fw.h"
56 #include "bnx2_fw2.h"
57
58 #define DRV_MODULE_NAME         "bnx2"
59 #define PFX DRV_MODULE_NAME     ": "
60 #define DRV_MODULE_VERSION      "1.5.5"
61 #define DRV_MODULE_RELDATE      "February 1, 2007"
62
63 #define RUN_AT(x) (jiffies + (x))
64
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT  (5*HZ)
67
68 static const char version[] __devinitdata =
69         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_msi = 0;
77
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81 typedef enum {
82         BCM5706 = 0,
83         NC370T,
84         NC370I,
85         BCM5706S,
86         NC370F,
87         BCM5708,
88         BCM5708S,
89         BCM5709,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         };
105
106 static struct pci_device_id bnx2_pci_tbl[] = {
107         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
109         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
111         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
112           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
113         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
114           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
118           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
120           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
122           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
123         { 0, }
124 };
125
126 static struct flash_spec flash_table[] =
127 {
128         /* Slow EEPROM */
129         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
130          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
132          "EEPROM - slow"},
133         /* Expansion entry 0001 */
134         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
135          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
137          "Entry 0001"},
138         /* Saifun SA25F010 (non-buffered flash) */
139         /* strap, cfg1, & write1 need updates */
140         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
141          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143          "Non-buffered flash (128kB)"},
144         /* Saifun SA25F020 (non-buffered flash) */
145         /* strap, cfg1, & write1 need updates */
146         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
147          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149          "Non-buffered flash (256kB)"},
150         /* Expansion entry 0100 */
151         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154          "Entry 0100"},
155         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
156         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
157          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165         /* Saifun SA25F005 (non-buffered flash) */
166         /* strap, cfg1, & write1 need updates */
167         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170          "Non-buffered flash (64kB)"},
171         /* Fast EEPROM */
172         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
175          "EEPROM - fast"},
176         /* Expansion entry 1001 */
177         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180          "Entry 1001"},
181         /* Expansion entry 1010 */
182         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1010"},
186         /* ATMEL AT45DB011B (buffered flash) */
187         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190          "Buffered flash (128kB)"},
191         /* Expansion entry 1100 */
192         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195          "Entry 1100"},
196         /* Expansion entry 1101 */
197         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1101"},
201         /* Ateml Expansion entry 1110 */
202         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1110 (Atmel)"},
206         /* ATMEL AT45DB021B (buffered flash) */
207         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210          "Buffered flash (256kB)"},
211 };
212
213 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
214
215 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
216 {
217         u32 diff;
218
219         smp_mb();
220
221         /* The ring uses 256 indices for 255 entries, one of them
222          * needs to be skipped.
223          */
224         diff = bp->tx_prod - bp->tx_cons;
225         if (unlikely(diff >= TX_DESC_CNT)) {
226                 diff &= 0xffff;
227                 if (diff == TX_DESC_CNT)
228                         diff = MAX_TX_DESC_CNT;
229         }
230         return (bp->tx_ring_size - diff);
231 }
232
233 static u32
234 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
235 {
236         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
237         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
238 }
239
240 static void
241 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
242 {
243         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
244         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
245 }
246
247 static void
248 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
249 {
250         offset += cid_addr;
251         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
252                 int i;
253
254                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
255                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
256                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
257                 for (i = 0; i < 5; i++) {
258                         u32 val;
259                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
260                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
261                                 break;
262                         udelay(5);
263                 }
264         } else {
265                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
266                 REG_WR(bp, BNX2_CTX_DATA, val);
267         }
268 }
269
270 static int
271 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
272 {
273         u32 val1;
274         int i, ret;
275
276         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
277                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
278                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
279
280                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
281                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
282
283                 udelay(40);
284         }
285
286         val1 = (bp->phy_addr << 21) | (reg << 16) |
287                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
288                 BNX2_EMAC_MDIO_COMM_START_BUSY;
289         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
290
291         for (i = 0; i < 50; i++) {
292                 udelay(10);
293
294                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
295                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
296                         udelay(5);
297
298                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
299                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
300
301                         break;
302                 }
303         }
304
305         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
306                 *val = 0x0;
307                 ret = -EBUSY;
308         }
309         else {
310                 *val = val1;
311                 ret = 0;
312         }
313
314         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
315                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
317
318                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
319                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
320
321                 udelay(40);
322         }
323
324         return ret;
325 }
326
327 static int
328 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
329 {
330         u32 val1;
331         int i, ret;
332
333         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
334                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
335                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
336
337                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
338                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339
340                 udelay(40);
341         }
342
343         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
344                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
345                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
346         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
347
348         for (i = 0; i < 50; i++) {
349                 udelay(10);
350
351                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
352                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
353                         udelay(5);
354                         break;
355                 }
356         }
357
358         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
359                 ret = -EBUSY;
360         else
361                 ret = 0;
362
363         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
364                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
366
367                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
369
370                 udelay(40);
371         }
372
373         return ret;
374 }
375
376 static void
377 bnx2_disable_int(struct bnx2 *bp)
378 {
379         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
380                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
381         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
382 }
383
384 static void
385 bnx2_enable_int(struct bnx2 *bp)
386 {
387         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
388                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
389                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
390
391         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
392                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
393
394         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
395 }
396
397 static void
398 bnx2_disable_int_sync(struct bnx2 *bp)
399 {
400         atomic_inc(&bp->intr_sem);
401         bnx2_disable_int(bp);
402         synchronize_irq(bp->pdev->irq);
403 }
404
405 static void
406 bnx2_netif_stop(struct bnx2 *bp)
407 {
408         bnx2_disable_int_sync(bp);
409         if (netif_running(bp->dev)) {
410                 netif_poll_disable(bp->dev);
411                 netif_tx_disable(bp->dev);
412                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
413         }
414 }
415
416 static void
417 bnx2_netif_start(struct bnx2 *bp)
418 {
419         if (atomic_dec_and_test(&bp->intr_sem)) {
420                 if (netif_running(bp->dev)) {
421                         netif_wake_queue(bp->dev);
422                         netif_poll_enable(bp->dev);
423                         bnx2_enable_int(bp);
424                 }
425         }
426 }
427
428 static void
429 bnx2_free_mem(struct bnx2 *bp)
430 {
431         int i;
432
433         for (i = 0; i < bp->ctx_pages; i++) {
434                 if (bp->ctx_blk[i]) {
435                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
436                                             bp->ctx_blk[i],
437                                             bp->ctx_blk_mapping[i]);
438                         bp->ctx_blk[i] = NULL;
439                 }
440         }
441         if (bp->status_blk) {
442                 pci_free_consistent(bp->pdev, bp->status_stats_size,
443                                     bp->status_blk, bp->status_blk_mapping);
444                 bp->status_blk = NULL;
445                 bp->stats_blk = NULL;
446         }
447         if (bp->tx_desc_ring) {
448                 pci_free_consistent(bp->pdev,
449                                     sizeof(struct tx_bd) * TX_DESC_CNT,
450                                     bp->tx_desc_ring, bp->tx_desc_mapping);
451                 bp->tx_desc_ring = NULL;
452         }
453         kfree(bp->tx_buf_ring);
454         bp->tx_buf_ring = NULL;
455         for (i = 0; i < bp->rx_max_ring; i++) {
456                 if (bp->rx_desc_ring[i])
457                         pci_free_consistent(bp->pdev,
458                                             sizeof(struct rx_bd) * RX_DESC_CNT,
459                                             bp->rx_desc_ring[i],
460                                             bp->rx_desc_mapping[i]);
461                 bp->rx_desc_ring[i] = NULL;
462         }
463         vfree(bp->rx_buf_ring);
464         bp->rx_buf_ring = NULL;
465 }
466
467 static int
468 bnx2_alloc_mem(struct bnx2 *bp)
469 {
470         int i, status_blk_size;
471
472         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
473                                   GFP_KERNEL);
474         if (bp->tx_buf_ring == NULL)
475                 return -ENOMEM;
476
477         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
478                                                 sizeof(struct tx_bd) *
479                                                 TX_DESC_CNT,
480                                                 &bp->tx_desc_mapping);
481         if (bp->tx_desc_ring == NULL)
482                 goto alloc_mem_err;
483
484         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
485                                   bp->rx_max_ring);
486         if (bp->rx_buf_ring == NULL)
487                 goto alloc_mem_err;
488
489         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
490                                    bp->rx_max_ring);
491
492         for (i = 0; i < bp->rx_max_ring; i++) {
493                 bp->rx_desc_ring[i] =
494                         pci_alloc_consistent(bp->pdev,
495                                              sizeof(struct rx_bd) * RX_DESC_CNT,
496                                              &bp->rx_desc_mapping[i]);
497                 if (bp->rx_desc_ring[i] == NULL)
498                         goto alloc_mem_err;
499
500         }
501
502         /* Combine status and statistics blocks into one allocation. */
503         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
504         bp->status_stats_size = status_blk_size +
505                                 sizeof(struct statistics_block);
506
507         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
508                                               &bp->status_blk_mapping);
509         if (bp->status_blk == NULL)
510                 goto alloc_mem_err;
511
512         memset(bp->status_blk, 0, bp->status_stats_size);
513
514         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
515                                   status_blk_size);
516
517         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
518
519         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
520                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
521                 if (bp->ctx_pages == 0)
522                         bp->ctx_pages = 1;
523                 for (i = 0; i < bp->ctx_pages; i++) {
524                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
525                                                 BCM_PAGE_SIZE,
526                                                 &bp->ctx_blk_mapping[i]);
527                         if (bp->ctx_blk[i] == NULL)
528                                 goto alloc_mem_err;
529                 }
530         }
531         return 0;
532
533 alloc_mem_err:
534         bnx2_free_mem(bp);
535         return -ENOMEM;
536 }
537
538 static void
539 bnx2_report_fw_link(struct bnx2 *bp)
540 {
541         u32 fw_link_status = 0;
542
543         if (bp->link_up) {
544                 u32 bmsr;
545
546                 switch (bp->line_speed) {
547                 case SPEED_10:
548                         if (bp->duplex == DUPLEX_HALF)
549                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
550                         else
551                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
552                         break;
553                 case SPEED_100:
554                         if (bp->duplex == DUPLEX_HALF)
555                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
556                         else
557                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
558                         break;
559                 case SPEED_1000:
560                         if (bp->duplex == DUPLEX_HALF)
561                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
562                         else
563                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
564                         break;
565                 case SPEED_2500:
566                         if (bp->duplex == DUPLEX_HALF)
567                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
568                         else
569                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
570                         break;
571                 }
572
573                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
574
575                 if (bp->autoneg) {
576                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
577
578                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
579                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
580
581                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
582                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
583                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
584                         else
585                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
586                 }
587         }
588         else
589                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
590
591         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
592 }
593
594 static void
595 bnx2_report_link(struct bnx2 *bp)
596 {
597         if (bp->link_up) {
598                 netif_carrier_on(bp->dev);
599                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
600
601                 printk("%d Mbps ", bp->line_speed);
602
603                 if (bp->duplex == DUPLEX_FULL)
604                         printk("full duplex");
605                 else
606                         printk("half duplex");
607
608                 if (bp->flow_ctrl) {
609                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
610                                 printk(", receive ");
611                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
612                                         printk("& transmit ");
613                         }
614                         else {
615                                 printk(", transmit ");
616                         }
617                         printk("flow control ON");
618                 }
619                 printk("\n");
620         }
621         else {
622                 netif_carrier_off(bp->dev);
623                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
624         }
625
626         bnx2_report_fw_link(bp);
627 }
628
629 static void
630 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
631 {
632         u32 local_adv, remote_adv;
633
634         bp->flow_ctrl = 0;
635         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
636                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
637
638                 if (bp->duplex == DUPLEX_FULL) {
639                         bp->flow_ctrl = bp->req_flow_ctrl;
640                 }
641                 return;
642         }
643
644         if (bp->duplex != DUPLEX_FULL) {
645                 return;
646         }
647
648         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
649             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
650                 u32 val;
651
652                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
653                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
654                         bp->flow_ctrl |= FLOW_CTRL_TX;
655                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
656                         bp->flow_ctrl |= FLOW_CTRL_RX;
657                 return;
658         }
659
660         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
661         bnx2_read_phy(bp, MII_LPA, &remote_adv);
662
663         if (bp->phy_flags & PHY_SERDES_FLAG) {
664                 u32 new_local_adv = 0;
665                 u32 new_remote_adv = 0;
666
667                 if (local_adv & ADVERTISE_1000XPAUSE)
668                         new_local_adv |= ADVERTISE_PAUSE_CAP;
669                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
670                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
671                 if (remote_adv & ADVERTISE_1000XPAUSE)
672                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
673                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
674                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
675
676                 local_adv = new_local_adv;
677                 remote_adv = new_remote_adv;
678         }
679
680         /* See Table 28B-3 of 802.3ab-1999 spec. */
681         if (local_adv & ADVERTISE_PAUSE_CAP) {
682                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
683                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
684                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
685                         }
686                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
687                                 bp->flow_ctrl = FLOW_CTRL_RX;
688                         }
689                 }
690                 else {
691                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
692                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
693                         }
694                 }
695         }
696         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
697                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
698                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
699
700                         bp->flow_ctrl = FLOW_CTRL_TX;
701                 }
702         }
703 }
704
705 static int
706 bnx2_5708s_linkup(struct bnx2 *bp)
707 {
708         u32 val;
709
710         bp->link_up = 1;
711         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
712         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
713                 case BCM5708S_1000X_STAT1_SPEED_10:
714                         bp->line_speed = SPEED_10;
715                         break;
716                 case BCM5708S_1000X_STAT1_SPEED_100:
717                         bp->line_speed = SPEED_100;
718                         break;
719                 case BCM5708S_1000X_STAT1_SPEED_1G:
720                         bp->line_speed = SPEED_1000;
721                         break;
722                 case BCM5708S_1000X_STAT1_SPEED_2G5:
723                         bp->line_speed = SPEED_2500;
724                         break;
725         }
726         if (val & BCM5708S_1000X_STAT1_FD)
727                 bp->duplex = DUPLEX_FULL;
728         else
729                 bp->duplex = DUPLEX_HALF;
730
731         return 0;
732 }
733
734 static int
735 bnx2_5706s_linkup(struct bnx2 *bp)
736 {
737         u32 bmcr, local_adv, remote_adv, common;
738
739         bp->link_up = 1;
740         bp->line_speed = SPEED_1000;
741
742         bnx2_read_phy(bp, MII_BMCR, &bmcr);
743         if (bmcr & BMCR_FULLDPLX) {
744                 bp->duplex = DUPLEX_FULL;
745         }
746         else {
747                 bp->duplex = DUPLEX_HALF;
748         }
749
750         if (!(bmcr & BMCR_ANENABLE)) {
751                 return 0;
752         }
753
754         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
755         bnx2_read_phy(bp, MII_LPA, &remote_adv);
756
757         common = local_adv & remote_adv;
758         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
759
760                 if (common & ADVERTISE_1000XFULL) {
761                         bp->duplex = DUPLEX_FULL;
762                 }
763                 else {
764                         bp->duplex = DUPLEX_HALF;
765                 }
766         }
767
768         return 0;
769 }
770
771 static int
772 bnx2_copper_linkup(struct bnx2 *bp)
773 {
774         u32 bmcr;
775
776         bnx2_read_phy(bp, MII_BMCR, &bmcr);
777         if (bmcr & BMCR_ANENABLE) {
778                 u32 local_adv, remote_adv, common;
779
780                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
781                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
782
783                 common = local_adv & (remote_adv >> 2);
784                 if (common & ADVERTISE_1000FULL) {
785                         bp->line_speed = SPEED_1000;
786                         bp->duplex = DUPLEX_FULL;
787                 }
788                 else if (common & ADVERTISE_1000HALF) {
789                         bp->line_speed = SPEED_1000;
790                         bp->duplex = DUPLEX_HALF;
791                 }
792                 else {
793                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
794                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
795
796                         common = local_adv & remote_adv;
797                         if (common & ADVERTISE_100FULL) {
798                                 bp->line_speed = SPEED_100;
799                                 bp->duplex = DUPLEX_FULL;
800                         }
801                         else if (common & ADVERTISE_100HALF) {
802                                 bp->line_speed = SPEED_100;
803                                 bp->duplex = DUPLEX_HALF;
804                         }
805                         else if (common & ADVERTISE_10FULL) {
806                                 bp->line_speed = SPEED_10;
807                                 bp->duplex = DUPLEX_FULL;
808                         }
809                         else if (common & ADVERTISE_10HALF) {
810                                 bp->line_speed = SPEED_10;
811                                 bp->duplex = DUPLEX_HALF;
812                         }
813                         else {
814                                 bp->line_speed = 0;
815                                 bp->link_up = 0;
816                         }
817                 }
818         }
819         else {
820                 if (bmcr & BMCR_SPEED100) {
821                         bp->line_speed = SPEED_100;
822                 }
823                 else {
824                         bp->line_speed = SPEED_10;
825                 }
826                 if (bmcr & BMCR_FULLDPLX) {
827                         bp->duplex = DUPLEX_FULL;
828                 }
829                 else {
830                         bp->duplex = DUPLEX_HALF;
831                 }
832         }
833
834         return 0;
835 }
836
837 static int
838 bnx2_set_mac_link(struct bnx2 *bp)
839 {
840         u32 val;
841
842         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
843         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
844                 (bp->duplex == DUPLEX_HALF)) {
845                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
846         }
847
848         /* Configure the EMAC mode register. */
849         val = REG_RD(bp, BNX2_EMAC_MODE);
850
851         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
852                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
853                 BNX2_EMAC_MODE_25G_MODE);
854
855         if (bp->link_up) {
856                 switch (bp->line_speed) {
857                         case SPEED_10:
858                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
859                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
860                                         break;
861                                 }
862                                 /* fall through */
863                         case SPEED_100:
864                                 val |= BNX2_EMAC_MODE_PORT_MII;
865                                 break;
866                         case SPEED_2500:
867                                 val |= BNX2_EMAC_MODE_25G_MODE;
868                                 /* fall through */
869                         case SPEED_1000:
870                                 val |= BNX2_EMAC_MODE_PORT_GMII;
871                                 break;
872                 }
873         }
874         else {
875                 val |= BNX2_EMAC_MODE_PORT_GMII;
876         }
877
878         /* Set the MAC to operate in the appropriate duplex mode. */
879         if (bp->duplex == DUPLEX_HALF)
880                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
881         REG_WR(bp, BNX2_EMAC_MODE, val);
882
883         /* Enable/disable rx PAUSE. */
884         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
885
886         if (bp->flow_ctrl & FLOW_CTRL_RX)
887                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
888         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
889
890         /* Enable/disable tx PAUSE. */
891         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
892         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
893
894         if (bp->flow_ctrl & FLOW_CTRL_TX)
895                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
896         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
897
898         /* Acknowledge the interrupt. */
899         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
900
901         return 0;
902 }
903
904 static int
905 bnx2_set_link(struct bnx2 *bp)
906 {
907         u32 bmsr;
908         u8 link_up;
909
910         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
911                 bp->link_up = 1;
912                 return 0;
913         }
914
915         link_up = bp->link_up;
916
917         bnx2_read_phy(bp, MII_BMSR, &bmsr);
918         bnx2_read_phy(bp, MII_BMSR, &bmsr);
919
920         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
921             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
922                 u32 val;
923
924                 val = REG_RD(bp, BNX2_EMAC_STATUS);
925                 if (val & BNX2_EMAC_STATUS_LINK)
926                         bmsr |= BMSR_LSTATUS;
927                 else
928                         bmsr &= ~BMSR_LSTATUS;
929         }
930
931         if (bmsr & BMSR_LSTATUS) {
932                 bp->link_up = 1;
933
934                 if (bp->phy_flags & PHY_SERDES_FLAG) {
935                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
936                                 bnx2_5706s_linkup(bp);
937                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
938                                 bnx2_5708s_linkup(bp);
939                 }
940                 else {
941                         bnx2_copper_linkup(bp);
942                 }
943                 bnx2_resolve_flow_ctrl(bp);
944         }
945         else {
946                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
947                         (bp->autoneg & AUTONEG_SPEED)) {
948
949                         u32 bmcr;
950
951                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
952                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
953                         if (!(bmcr & BMCR_ANENABLE)) {
954                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
955                                         BMCR_ANENABLE);
956                         }
957                 }
958                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
959                 bp->link_up = 0;
960         }
961
962         if (bp->link_up != link_up) {
963                 bnx2_report_link(bp);
964         }
965
966         bnx2_set_mac_link(bp);
967
968         return 0;
969 }
970
971 static int
972 bnx2_reset_phy(struct bnx2 *bp)
973 {
974         int i;
975         u32 reg;
976
977         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
978
979 #define PHY_RESET_MAX_WAIT 100
980         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
981                 udelay(10);
982
983                 bnx2_read_phy(bp, MII_BMCR, &reg);
984                 if (!(reg & BMCR_RESET)) {
985                         udelay(20);
986                         break;
987                 }
988         }
989         if (i == PHY_RESET_MAX_WAIT) {
990                 return -EBUSY;
991         }
992         return 0;
993 }
994
995 static u32
996 bnx2_phy_get_pause_adv(struct bnx2 *bp)
997 {
998         u32 adv = 0;
999
1000         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1001                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1002
1003                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1004                         adv = ADVERTISE_1000XPAUSE;
1005                 }
1006                 else {
1007                         adv = ADVERTISE_PAUSE_CAP;
1008                 }
1009         }
1010         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1011                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1012                         adv = ADVERTISE_1000XPSE_ASYM;
1013                 }
1014                 else {
1015                         adv = ADVERTISE_PAUSE_ASYM;
1016                 }
1017         }
1018         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1019                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1020                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1021                 }
1022                 else {
1023                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1024                 }
1025         }
1026         return adv;
1027 }
1028
1029 static int
1030 bnx2_setup_serdes_phy(struct bnx2 *bp)
1031 {
1032         u32 adv, bmcr, up1;
1033         u32 new_adv = 0;
1034
1035         if (!(bp->autoneg & AUTONEG_SPEED)) {
1036                 u32 new_bmcr;
1037                 int force_link_down = 0;
1038
1039                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1040                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1041
1042                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1043                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1044                 new_bmcr |= BMCR_SPEED1000;
1045                 if (bp->req_line_speed == SPEED_2500) {
1046                         new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1047                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1048                         if (!(up1 & BCM5708S_UP1_2G5)) {
1049                                 up1 |= BCM5708S_UP1_2G5;
1050                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1051                                 force_link_down = 1;
1052                         }
1053                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1054                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1055                         if (up1 & BCM5708S_UP1_2G5) {
1056                                 up1 &= ~BCM5708S_UP1_2G5;
1057                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1058                                 force_link_down = 1;
1059                         }
1060                 }
1061
1062                 if (bp->req_duplex == DUPLEX_FULL) {
1063                         adv |= ADVERTISE_1000XFULL;
1064                         new_bmcr |= BMCR_FULLDPLX;
1065                 }
1066                 else {
1067                         adv |= ADVERTISE_1000XHALF;
1068                         new_bmcr &= ~BMCR_FULLDPLX;
1069                 }
1070                 if ((new_bmcr != bmcr) || (force_link_down)) {
1071                         /* Force a link down visible on the other side */
1072                         if (bp->link_up) {
1073                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1074                                                ~(ADVERTISE_1000XFULL |
1075                                                  ADVERTISE_1000XHALF));
1076                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
1077                                         BMCR_ANRESTART | BMCR_ANENABLE);
1078
1079                                 bp->link_up = 0;
1080                                 netif_carrier_off(bp->dev);
1081                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1082                                 bnx2_report_link(bp);
1083                         }
1084                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
1085                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1086                 }
1087                 return 0;
1088         }
1089
1090         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1091                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1092                 up1 |= BCM5708S_UP1_2G5;
1093                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1094         }
1095
1096         if (bp->advertising & ADVERTISED_1000baseT_Full)
1097                 new_adv |= ADVERTISE_1000XFULL;
1098
1099         new_adv |= bnx2_phy_get_pause_adv(bp);
1100
1101         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1102         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1103
1104         bp->serdes_an_pending = 0;
1105         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1106                 /* Force a link down visible on the other side */
1107                 if (bp->link_up) {
1108                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1109                         spin_unlock_bh(&bp->phy_lock);
1110                         msleep(20);
1111                         spin_lock_bh(&bp->phy_lock);
1112                 }
1113
1114                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1115                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1116                         BMCR_ANENABLE);
1117                 /* Speed up link-up time when the link partner
1118                  * does not autonegotiate which is very common
1119                  * in blade servers. Some blade servers use
1120                  * IPMI for kerboard input and it's important
1121                  * to minimize link disruptions. Autoneg. involves
1122                  * exchanging base pages plus 3 next pages and
1123                  * normally completes in about 120 msec.
1124                  */
1125                 bp->current_interval = SERDES_AN_TIMEOUT;
1126                 bp->serdes_an_pending = 1;
1127                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1128         }
1129
1130         return 0;
1131 }
1132
1133 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1134         (ADVERTISED_1000baseT_Full)
1135
1136 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1137         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1138         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1139         ADVERTISED_1000baseT_Full)
1140
1141 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1142         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1143
1144 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1145
1146 static int
1147 bnx2_setup_copper_phy(struct bnx2 *bp)
1148 {
1149         u32 bmcr;
1150         u32 new_bmcr;
1151
1152         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1153
1154         if (bp->autoneg & AUTONEG_SPEED) {
1155                 u32 adv_reg, adv1000_reg;
1156                 u32 new_adv_reg = 0;
1157                 u32 new_adv1000_reg = 0;
1158
1159                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1160                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1161                         ADVERTISE_PAUSE_ASYM);
1162
1163                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1164                 adv1000_reg &= PHY_ALL_1000_SPEED;
1165
1166                 if (bp->advertising & ADVERTISED_10baseT_Half)
1167                         new_adv_reg |= ADVERTISE_10HALF;
1168                 if (bp->advertising & ADVERTISED_10baseT_Full)
1169                         new_adv_reg |= ADVERTISE_10FULL;
1170                 if (bp->advertising & ADVERTISED_100baseT_Half)
1171                         new_adv_reg |= ADVERTISE_100HALF;
1172                 if (bp->advertising & ADVERTISED_100baseT_Full)
1173                         new_adv_reg |= ADVERTISE_100FULL;
1174                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1175                         new_adv1000_reg |= ADVERTISE_1000FULL;
1176
1177                 new_adv_reg |= ADVERTISE_CSMA;
1178
1179                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1180
1181                 if ((adv1000_reg != new_adv1000_reg) ||
1182                         (adv_reg != new_adv_reg) ||
1183                         ((bmcr & BMCR_ANENABLE) == 0)) {
1184
1185                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1186                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1187                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1188                                 BMCR_ANENABLE);
1189                 }
1190                 else if (bp->link_up) {
1191                         /* Flow ctrl may have changed from auto to forced */
1192                         /* or vice-versa. */
1193
1194                         bnx2_resolve_flow_ctrl(bp);
1195                         bnx2_set_mac_link(bp);
1196                 }
1197                 return 0;
1198         }
1199
1200         new_bmcr = 0;
1201         if (bp->req_line_speed == SPEED_100) {
1202                 new_bmcr |= BMCR_SPEED100;
1203         }
1204         if (bp->req_duplex == DUPLEX_FULL) {
1205                 new_bmcr |= BMCR_FULLDPLX;
1206         }
1207         if (new_bmcr != bmcr) {
1208                 u32 bmsr;
1209
1210                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1211                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1212
1213                 if (bmsr & BMSR_LSTATUS) {
1214                         /* Force link down */
1215                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1216                         spin_unlock_bh(&bp->phy_lock);
1217                         msleep(50);
1218                         spin_lock_bh(&bp->phy_lock);
1219
1220                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1221                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1222                 }
1223
1224                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1225
1226                 /* Normally, the new speed is setup after the link has
1227                  * gone down and up again. In some cases, link will not go
1228                  * down so we need to set up the new speed here.
1229                  */
1230                 if (bmsr & BMSR_LSTATUS) {
1231                         bp->line_speed = bp->req_line_speed;
1232                         bp->duplex = bp->req_duplex;
1233                         bnx2_resolve_flow_ctrl(bp);
1234                         bnx2_set_mac_link(bp);
1235                 }
1236         }
1237         return 0;
1238 }
1239
1240 static int
1241 bnx2_setup_phy(struct bnx2 *bp)
1242 {
1243         if (bp->loopback == MAC_LOOPBACK)
1244                 return 0;
1245
1246         if (bp->phy_flags & PHY_SERDES_FLAG) {
1247                 return (bnx2_setup_serdes_phy(bp));
1248         }
1249         else {
1250                 return (bnx2_setup_copper_phy(bp));
1251         }
1252 }
1253
1254 static int
1255 bnx2_init_5708s_phy(struct bnx2 *bp)
1256 {
1257         u32 val;
1258
1259         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1260         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1261         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1262
1263         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1264         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1265         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1266
1267         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1268         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1269         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1270
1271         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1272                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1273                 val |= BCM5708S_UP1_2G5;
1274                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1275         }
1276
1277         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1278             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1279             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1280                 /* increase tx signal amplitude */
1281                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1282                                BCM5708S_BLK_ADDR_TX_MISC);
1283                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1284                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1285                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1286                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1287         }
1288
1289         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1290               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1291
1292         if (val) {
1293                 u32 is_backplane;
1294
1295                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1296                                           BNX2_SHARED_HW_CFG_CONFIG);
1297                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1298                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299                                        BCM5708S_BLK_ADDR_TX_MISC);
1300                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1301                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1302                                        BCM5708S_BLK_ADDR_DIG);
1303                 }
1304         }
1305         return 0;
1306 }
1307
1308 static int
1309 bnx2_init_5706s_phy(struct bnx2 *bp)
1310 {
1311         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1312
1313         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1314                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1315
1316         if (bp->dev->mtu > 1500) {
1317                 u32 val;
1318
1319                 /* Set extended packet length bit */
1320                 bnx2_write_phy(bp, 0x18, 0x7);
1321                 bnx2_read_phy(bp, 0x18, &val);
1322                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1323
1324                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1325                 bnx2_read_phy(bp, 0x1c, &val);
1326                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1327         }
1328         else {
1329                 u32 val;
1330
1331                 bnx2_write_phy(bp, 0x18, 0x7);
1332                 bnx2_read_phy(bp, 0x18, &val);
1333                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1334
1335                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1336                 bnx2_read_phy(bp, 0x1c, &val);
1337                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1338         }
1339
1340         return 0;
1341 }
1342
1343 static int
1344 bnx2_init_copper_phy(struct bnx2 *bp)
1345 {
1346         u32 val;
1347
1348         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1349                 bnx2_write_phy(bp, 0x18, 0x0c00);
1350                 bnx2_write_phy(bp, 0x17, 0x000a);
1351                 bnx2_write_phy(bp, 0x15, 0x310b);
1352                 bnx2_write_phy(bp, 0x17, 0x201f);
1353                 bnx2_write_phy(bp, 0x15, 0x9506);
1354                 bnx2_write_phy(bp, 0x17, 0x401f);
1355                 bnx2_write_phy(bp, 0x15, 0x14e2);
1356                 bnx2_write_phy(bp, 0x18, 0x0400);
1357         }
1358
1359         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1360                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1361                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1362                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1363                 val &= ~(1 << 8);
1364                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1365         }
1366
1367         if (bp->dev->mtu > 1500) {
1368                 /* Set extended packet length bit */
1369                 bnx2_write_phy(bp, 0x18, 0x7);
1370                 bnx2_read_phy(bp, 0x18, &val);
1371                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1372
1373                 bnx2_read_phy(bp, 0x10, &val);
1374                 bnx2_write_phy(bp, 0x10, val | 0x1);
1375         }
1376         else {
1377                 bnx2_write_phy(bp, 0x18, 0x7);
1378                 bnx2_read_phy(bp, 0x18, &val);
1379                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1380
1381                 bnx2_read_phy(bp, 0x10, &val);
1382                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1383         }
1384
1385         /* ethernet@wirespeed */
1386         bnx2_write_phy(bp, 0x18, 0x7007);
1387         bnx2_read_phy(bp, 0x18, &val);
1388         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1389         return 0;
1390 }
1391
1392
1393 static int
1394 bnx2_init_phy(struct bnx2 *bp)
1395 {
1396         u32 val;
1397         int rc = 0;
1398
1399         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1400         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1401
1402         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1403
1404         bnx2_reset_phy(bp);
1405
1406         bnx2_read_phy(bp, MII_PHYSID1, &val);
1407         bp->phy_id = val << 16;
1408         bnx2_read_phy(bp, MII_PHYSID2, &val);
1409         bp->phy_id |= val & 0xffff;
1410
1411         if (bp->phy_flags & PHY_SERDES_FLAG) {
1412                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1413                         rc = bnx2_init_5706s_phy(bp);
1414                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1415                         rc = bnx2_init_5708s_phy(bp);
1416         }
1417         else {
1418                 rc = bnx2_init_copper_phy(bp);
1419         }
1420
1421         bnx2_setup_phy(bp);
1422
1423         return rc;
1424 }
1425
1426 static int
1427 bnx2_set_mac_loopback(struct bnx2 *bp)
1428 {
1429         u32 mac_mode;
1430
1431         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1432         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1433         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1434         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1435         bp->link_up = 1;
1436         return 0;
1437 }
1438
1439 static int bnx2_test_link(struct bnx2 *);
1440
1441 static int
1442 bnx2_set_phy_loopback(struct bnx2 *bp)
1443 {
1444         u32 mac_mode;
1445         int rc, i;
1446
1447         spin_lock_bh(&bp->phy_lock);
1448         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1449                             BMCR_SPEED1000);
1450         spin_unlock_bh(&bp->phy_lock);
1451         if (rc)
1452                 return rc;
1453
1454         for (i = 0; i < 10; i++) {
1455                 if (bnx2_test_link(bp) == 0)
1456                         break;
1457                 msleep(100);
1458         }
1459
1460         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1461         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1462                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1463                       BNX2_EMAC_MODE_25G_MODE);
1464
1465         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1466         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1467         bp->link_up = 1;
1468         return 0;
1469 }
1470
1471 static int
1472 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1473 {
1474         int i;
1475         u32 val;
1476
1477         bp->fw_wr_seq++;
1478         msg_data |= bp->fw_wr_seq;
1479
1480         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1481
1482         /* wait for an acknowledgement. */
1483         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1484                 msleep(10);
1485
1486                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1487
1488                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1489                         break;
1490         }
1491         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1492                 return 0;
1493
1494         /* If we timed out, inform the firmware that this is the case. */
1495         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1496                 if (!silent)
1497                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1498                                             "%x\n", msg_data);
1499
1500                 msg_data &= ~BNX2_DRV_MSG_CODE;
1501                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1502
1503                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1504
1505                 return -EBUSY;
1506         }
1507
1508         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1509                 return -EIO;
1510
1511         return 0;
1512 }
1513
1514 static int
1515 bnx2_init_5709_context(struct bnx2 *bp)
1516 {
1517         int i, ret = 0;
1518         u32 val;
1519
1520         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1521         val |= (BCM_PAGE_BITS - 8) << 16;
1522         REG_WR(bp, BNX2_CTX_COMMAND, val);
1523         for (i = 0; i < bp->ctx_pages; i++) {
1524                 int j;
1525
1526                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1527                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
1528                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1529                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1530                        (u64) bp->ctx_blk_mapping[i] >> 32);
1531                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1532                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1533                 for (j = 0; j < 10; j++) {
1534
1535                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1536                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1537                                 break;
1538                         udelay(5);
1539                 }
1540                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1541                         ret = -EBUSY;
1542                         break;
1543                 }
1544         }
1545         return ret;
1546 }
1547
1548 static void
1549 bnx2_init_context(struct bnx2 *bp)
1550 {
1551         u32 vcid;
1552
1553         vcid = 96;
1554         while (vcid) {
1555                 u32 vcid_addr, pcid_addr, offset;
1556
1557                 vcid--;
1558
1559                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1560                         u32 new_vcid;
1561
1562                         vcid_addr = GET_PCID_ADDR(vcid);
1563                         if (vcid & 0x8) {
1564                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1565                         }
1566                         else {
1567                                 new_vcid = vcid;
1568                         }
1569                         pcid_addr = GET_PCID_ADDR(new_vcid);
1570                 }
1571                 else {
1572                         vcid_addr = GET_CID_ADDR(vcid);
1573                         pcid_addr = vcid_addr;
1574                 }
1575
1576                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1577                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1578
1579                 /* Zero out the context. */
1580                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1581                         CTX_WR(bp, 0x00, offset, 0);
1582                 }
1583
1584                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1585                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1586         }
1587 }
1588
1589 static int
1590 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1591 {
1592         u16 *good_mbuf;
1593         u32 good_mbuf_cnt;
1594         u32 val;
1595
1596         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1597         if (good_mbuf == NULL) {
1598                 printk(KERN_ERR PFX "Failed to allocate memory in "
1599                                     "bnx2_alloc_bad_rbuf\n");
1600                 return -ENOMEM;
1601         }
1602
1603         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1604                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1605
1606         good_mbuf_cnt = 0;
1607
1608         /* Allocate a bunch of mbufs and save the good ones in an array. */
1609         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1610         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1611                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1612
1613                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1614
1615                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1616
1617                 /* The addresses with Bit 9 set are bad memory blocks. */
1618                 if (!(val & (1 << 9))) {
1619                         good_mbuf[good_mbuf_cnt] = (u16) val;
1620                         good_mbuf_cnt++;
1621                 }
1622
1623                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1624         }
1625
1626         /* Free the good ones back to the mbuf pool thus discarding
1627          * all the bad ones. */
1628         while (good_mbuf_cnt) {
1629                 good_mbuf_cnt--;
1630
1631                 val = good_mbuf[good_mbuf_cnt];
1632                 val = (val << 9) | val | 1;
1633
1634                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1635         }
1636         kfree(good_mbuf);
1637         return 0;
1638 }
1639
1640 static void
1641 bnx2_set_mac_addr(struct bnx2 *bp)
1642 {
1643         u32 val;
1644         u8 *mac_addr = bp->dev->dev_addr;
1645
1646         val = (mac_addr[0] << 8) | mac_addr[1];
1647
1648         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1649
1650         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1651                 (mac_addr[4] << 8) | mac_addr[5];
1652
1653         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1654 }
1655
1656 static inline int
1657 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1658 {
1659         struct sk_buff *skb;
1660         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1661         dma_addr_t mapping;
1662         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1663         unsigned long align;
1664
1665         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1666         if (skb == NULL) {
1667                 return -ENOMEM;
1668         }
1669
1670         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1671                 skb_reserve(skb, BNX2_RX_ALIGN - align);
1672
1673         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1674                 PCI_DMA_FROMDEVICE);
1675
1676         rx_buf->skb = skb;
1677         pci_unmap_addr_set(rx_buf, mapping, mapping);
1678
1679         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1680         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1681
1682         bp->rx_prod_bseq += bp->rx_buf_use_size;
1683
1684         return 0;
1685 }
1686
1687 static void
1688 bnx2_phy_int(struct bnx2 *bp)
1689 {
1690         u32 new_link_state, old_link_state;
1691
1692         new_link_state = bp->status_blk->status_attn_bits &
1693                 STATUS_ATTN_BITS_LINK_STATE;
1694         old_link_state = bp->status_blk->status_attn_bits_ack &
1695                 STATUS_ATTN_BITS_LINK_STATE;
1696         if (new_link_state != old_link_state) {
1697                 if (new_link_state) {
1698                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1699                                 STATUS_ATTN_BITS_LINK_STATE);
1700                 }
1701                 else {
1702                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1703                                 STATUS_ATTN_BITS_LINK_STATE);
1704                 }
1705                 bnx2_set_link(bp);
1706         }
1707 }
1708
1709 static void
1710 bnx2_tx_int(struct bnx2 *bp)
1711 {
1712         struct status_block *sblk = bp->status_blk;
1713         u16 hw_cons, sw_cons, sw_ring_cons;
1714         int tx_free_bd = 0;
1715
1716         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1717         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1718                 hw_cons++;
1719         }
1720         sw_cons = bp->tx_cons;
1721
1722         while (sw_cons != hw_cons) {
1723                 struct sw_bd *tx_buf;
1724                 struct sk_buff *skb;
1725                 int i, last;
1726
1727                 sw_ring_cons = TX_RING_IDX(sw_cons);
1728
1729                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1730                 skb = tx_buf->skb;
1731 #ifdef BCM_TSO
1732                 /* partial BD completions possible with TSO packets */
1733                 if (skb_is_gso(skb)) {
1734                         u16 last_idx, last_ring_idx;
1735
1736                         last_idx = sw_cons +
1737                                 skb_shinfo(skb)->nr_frags + 1;
1738                         last_ring_idx = sw_ring_cons +
1739                                 skb_shinfo(skb)->nr_frags + 1;
1740                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1741                                 last_idx++;
1742                         }
1743                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1744                                 break;
1745                         }
1746                 }
1747 #endif
1748                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1749                         skb_headlen(skb), PCI_DMA_TODEVICE);
1750
1751                 tx_buf->skb = NULL;
1752                 last = skb_shinfo(skb)->nr_frags;
1753
1754                 for (i = 0; i < last; i++) {
1755                         sw_cons = NEXT_TX_BD(sw_cons);
1756
1757                         pci_unmap_page(bp->pdev,
1758                                 pci_unmap_addr(
1759                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1760                                         mapping),
1761                                 skb_shinfo(skb)->frags[i].size,
1762                                 PCI_DMA_TODEVICE);
1763                 }
1764
1765                 sw_cons = NEXT_TX_BD(sw_cons);
1766
1767                 tx_free_bd += last + 1;
1768
1769                 dev_kfree_skb(skb);
1770
1771                 hw_cons = bp->hw_tx_cons =
1772                         sblk->status_tx_quick_consumer_index0;
1773
1774                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1775                         hw_cons++;
1776                 }
1777         }
1778
1779         bp->tx_cons = sw_cons;
1780         /* Need to make the tx_cons update visible to bnx2_start_xmit()
1781          * before checking for netif_queue_stopped().  Without the
1782          * memory barrier, there is a small possibility that bnx2_start_xmit()
1783          * will miss it and cause the queue to be stopped forever.
1784          */
1785         smp_mb();
1786
1787         if (unlikely(netif_queue_stopped(bp->dev)) &&
1788                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1789                 netif_tx_lock(bp->dev);
1790                 if ((netif_queue_stopped(bp->dev)) &&
1791                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1792                         netif_wake_queue(bp->dev);
1793                 netif_tx_unlock(bp->dev);
1794         }
1795 }
1796
1797 static inline void
1798 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1799         u16 cons, u16 prod)
1800 {
1801         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1802         struct rx_bd *cons_bd, *prod_bd;
1803
1804         cons_rx_buf = &bp->rx_buf_ring[cons];
1805         prod_rx_buf = &bp->rx_buf_ring[prod];
1806
1807         pci_dma_sync_single_for_device(bp->pdev,
1808                 pci_unmap_addr(cons_rx_buf, mapping),
1809                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1810
1811         bp->rx_prod_bseq += bp->rx_buf_use_size;
1812
1813         prod_rx_buf->skb = skb;
1814
1815         if (cons == prod)
1816                 return;
1817
1818         pci_unmap_addr_set(prod_rx_buf, mapping,
1819                         pci_unmap_addr(cons_rx_buf, mapping));
1820
1821         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1822         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1823         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1824         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1825 }
1826
1827 static int
1828 bnx2_rx_int(struct bnx2 *bp, int budget)
1829 {
1830         struct status_block *sblk = bp->status_blk;
1831         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1832         struct l2_fhdr *rx_hdr;
1833         int rx_pkt = 0;
1834
1835         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1836         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1837                 hw_cons++;
1838         }
1839         sw_cons = bp->rx_cons;
1840         sw_prod = bp->rx_prod;
1841
1842         /* Memory barrier necessary as speculative reads of the rx
1843          * buffer can be ahead of the index in the status block
1844          */
1845         rmb();
1846         while (sw_cons != hw_cons) {
1847                 unsigned int len;
1848                 u32 status;
1849                 struct sw_bd *rx_buf;
1850                 struct sk_buff *skb;
1851                 dma_addr_t dma_addr;
1852
1853                 sw_ring_cons = RX_RING_IDX(sw_cons);
1854                 sw_ring_prod = RX_RING_IDX(sw_prod);
1855
1856                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1857                 skb = rx_buf->skb;
1858
1859                 rx_buf->skb = NULL;
1860
1861                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1862
1863                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1864                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1865
1866                 rx_hdr = (struct l2_fhdr *) skb->data;
1867                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1868
1869                 if ((status = rx_hdr->l2_fhdr_status) &
1870                         (L2_FHDR_ERRORS_BAD_CRC |
1871                         L2_FHDR_ERRORS_PHY_DECODE |
1872                         L2_FHDR_ERRORS_ALIGNMENT |
1873                         L2_FHDR_ERRORS_TOO_SHORT |
1874                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1875
1876                         goto reuse_rx;
1877                 }
1878
1879                 /* Since we don't have a jumbo ring, copy small packets
1880                  * if mtu > 1500
1881                  */
1882                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1883                         struct sk_buff *new_skb;
1884
1885                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
1886                         if (new_skb == NULL)
1887                                 goto reuse_rx;
1888
1889                         /* aligned copy */
1890                         memcpy(new_skb->data,
1891                                 skb->data + bp->rx_offset - 2,
1892                                 len + 2);
1893
1894                         skb_reserve(new_skb, 2);
1895                         skb_put(new_skb, len);
1896
1897                         bnx2_reuse_rx_skb(bp, skb,
1898                                 sw_ring_cons, sw_ring_prod);
1899
1900                         skb = new_skb;
1901                 }
1902                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1903                         pci_unmap_single(bp->pdev, dma_addr,
1904                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1905
1906                         skb_reserve(skb, bp->rx_offset);
1907                         skb_put(skb, len);
1908                 }
1909                 else {
1910 reuse_rx:
1911                         bnx2_reuse_rx_skb(bp, skb,
1912                                 sw_ring_cons, sw_ring_prod);
1913                         goto next_rx;
1914                 }
1915
1916                 skb->protocol = eth_type_trans(skb, bp->dev);
1917
1918                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1919                         (ntohs(skb->protocol) != 0x8100)) {
1920
1921                         dev_kfree_skb(skb);
1922                         goto next_rx;
1923
1924                 }
1925
1926                 skb->ip_summed = CHECKSUM_NONE;
1927                 if (bp->rx_csum &&
1928                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1929                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1930
1931                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1932                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1933                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1934                 }
1935
1936 #ifdef BCM_VLAN
1937                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1938                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1939                                 rx_hdr->l2_fhdr_vlan_tag);
1940                 }
1941                 else
1942 #endif
1943                         netif_receive_skb(skb);
1944
1945                 bp->dev->last_rx = jiffies;
1946                 rx_pkt++;
1947
1948 next_rx:
1949                 sw_cons = NEXT_RX_BD(sw_cons);
1950                 sw_prod = NEXT_RX_BD(sw_prod);
1951
1952                 if ((rx_pkt == budget))
1953                         break;
1954
1955                 /* Refresh hw_cons to see if there is new work */
1956                 if (sw_cons == hw_cons) {
1957                         hw_cons = bp->hw_rx_cons =
1958                                 sblk->status_rx_quick_consumer_index0;
1959                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1960                                 hw_cons++;
1961                         rmb();
1962                 }
1963         }
1964         bp->rx_cons = sw_cons;
1965         bp->rx_prod = sw_prod;
1966
1967         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1968
1969         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1970
1971         mmiowb();
1972
1973         return rx_pkt;
1974
1975 }
1976
1977 /* MSI ISR - The only difference between this and the INTx ISR
1978  * is that the MSI interrupt is always serviced.
1979  */
1980 static irqreturn_t
1981 bnx2_msi(int irq, void *dev_instance)
1982 {
1983         struct net_device *dev = dev_instance;
1984         struct bnx2 *bp = netdev_priv(dev);
1985
1986         prefetch(bp->status_blk);
1987         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1988                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1989                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1990
1991         /* Return here if interrupt is disabled. */
1992         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1993                 return IRQ_HANDLED;
1994
1995         netif_rx_schedule(dev);
1996
1997         return IRQ_HANDLED;
1998 }
1999
2000 static irqreturn_t
2001 bnx2_interrupt(int irq, void *dev_instance)
2002 {
2003         struct net_device *dev = dev_instance;
2004         struct bnx2 *bp = netdev_priv(dev);
2005
2006         /* When using INTx, it is possible for the interrupt to arrive
2007          * at the CPU before the status block posted prior to the
2008          * interrupt. Reading a register will flush the status block.
2009          * When using MSI, the MSI message will always complete after
2010          * the status block write.
2011          */
2012         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2013             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2014              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2015                 return IRQ_NONE;
2016
2017         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2018                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2019                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2020
2021         /* Return here if interrupt is shared and is disabled. */
2022         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2023                 return IRQ_HANDLED;
2024
2025         netif_rx_schedule(dev);
2026
2027         return IRQ_HANDLED;
2028 }
2029
2030 static inline int
2031 bnx2_has_work(struct bnx2 *bp)
2032 {
2033         struct status_block *sblk = bp->status_blk;
2034
2035         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2036             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2037                 return 1;
2038
2039         if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2040             bp->link_up)
2041                 return 1;
2042
2043         return 0;
2044 }
2045
2046 static int
2047 bnx2_poll(struct net_device *dev, int *budget)
2048 {
2049         struct bnx2 *bp = netdev_priv(dev);
2050
2051         if ((bp->status_blk->status_attn_bits &
2052                 STATUS_ATTN_BITS_LINK_STATE) !=
2053                 (bp->status_blk->status_attn_bits_ack &
2054                 STATUS_ATTN_BITS_LINK_STATE)) {
2055
2056                 spin_lock(&bp->phy_lock);
2057                 bnx2_phy_int(bp);
2058                 spin_unlock(&bp->phy_lock);
2059
2060                 /* This is needed to take care of transient status
2061                  * during link changes.
2062                  */
2063                 REG_WR(bp, BNX2_HC_COMMAND,
2064                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2065                 REG_RD(bp, BNX2_HC_COMMAND);
2066         }
2067
2068         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2069                 bnx2_tx_int(bp);
2070
2071         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2072                 int orig_budget = *budget;
2073                 int work_done;
2074
2075                 if (orig_budget > dev->quota)
2076                         orig_budget = dev->quota;
2077
2078                 work_done = bnx2_rx_int(bp, orig_budget);
2079                 *budget -= work_done;
2080                 dev->quota -= work_done;
2081         }
2082
2083         bp->last_status_idx = bp->status_blk->status_idx;
2084         rmb();
2085
2086         if (!bnx2_has_work(bp)) {
2087                 netif_rx_complete(dev);
2088                 if (likely(bp->flags & USING_MSI_FLAG)) {
2089                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2090                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2091                                bp->last_status_idx);
2092                         return 0;
2093                 }
2094                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2095                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2096                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2097                        bp->last_status_idx);
2098
2099                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2100                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2101                        bp->last_status_idx);
2102                 return 0;
2103         }
2104
2105         return 1;
2106 }
2107
2108 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2109  * from set_multicast.
2110  */
2111 static void
2112 bnx2_set_rx_mode(struct net_device *dev)
2113 {
2114         struct bnx2 *bp = netdev_priv(dev);
2115         u32 rx_mode, sort_mode;
2116         int i;
2117
2118         spin_lock_bh(&bp->phy_lock);
2119
2120         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2121                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2122         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2123 #ifdef BCM_VLAN
2124         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2125                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2126 #else
2127         if (!(bp->flags & ASF_ENABLE_FLAG))
2128                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2129 #endif
2130         if (dev->flags & IFF_PROMISC) {
2131                 /* Promiscuous mode. */
2132                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2133                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2134                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2135         }
2136         else if (dev->flags & IFF_ALLMULTI) {
2137                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2138                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2139                                0xffffffff);
2140                 }
2141                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2142         }
2143         else {
2144                 /* Accept one or more multicast(s). */
2145                 struct dev_mc_list *mclist;
2146                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2147                 u32 regidx;
2148                 u32 bit;
2149                 u32 crc;
2150
2151                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2152
2153                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2154                      i++, mclist = mclist->next) {
2155
2156                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2157                         bit = crc & 0xff;
2158                         regidx = (bit & 0xe0) >> 5;
2159                         bit &= 0x1f;
2160                         mc_filter[regidx] |= (1 << bit);
2161                 }
2162
2163                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2164                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2165                                mc_filter[i]);
2166                 }
2167
2168                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2169         }
2170
2171         if (rx_mode != bp->rx_mode) {
2172                 bp->rx_mode = rx_mode;
2173                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2174         }
2175
2176         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2177         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2178         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2179
2180         spin_unlock_bh(&bp->phy_lock);
2181 }
2182
2183 #define FW_BUF_SIZE     0x8000
2184
2185 static int
2186 bnx2_gunzip_init(struct bnx2 *bp)
2187 {
2188         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2189                 goto gunzip_nomem1;
2190
2191         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2192                 goto gunzip_nomem2;
2193
2194         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2195         if (bp->strm->workspace == NULL)
2196                 goto gunzip_nomem3;
2197
2198         return 0;
2199
2200 gunzip_nomem3:
2201         kfree(bp->strm);
2202         bp->strm = NULL;
2203
2204 gunzip_nomem2:
2205         vfree(bp->gunzip_buf);
2206         bp->gunzip_buf = NULL;
2207
2208 gunzip_nomem1:
2209         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2210                             "uncompression.\n", bp->dev->name);
2211         return -ENOMEM;
2212 }
2213
2214 static void
2215 bnx2_gunzip_end(struct bnx2 *bp)
2216 {
2217         kfree(bp->strm->workspace);
2218
2219         kfree(bp->strm);
2220         bp->strm = NULL;
2221
2222         if (bp->gunzip_buf) {
2223                 vfree(bp->gunzip_buf);
2224                 bp->gunzip_buf = NULL;
2225         }
2226 }
2227
2228 static int
2229 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2230 {
2231         int n, rc;
2232
2233         /* check gzip header */
2234         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2235                 return -EINVAL;
2236
2237         n = 10;
2238
2239 #define FNAME   0x8
2240         if (zbuf[3] & FNAME)
2241                 while ((zbuf[n++] != 0) && (n < len));
2242
2243         bp->strm->next_in = zbuf + n;
2244         bp->strm->avail_in = len - n;
2245         bp->strm->next_out = bp->gunzip_buf;
2246         bp->strm->avail_out = FW_BUF_SIZE;
2247
2248         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2249         if (rc != Z_OK)
2250                 return rc;
2251
2252         rc = zlib_inflate(bp->strm, Z_FINISH);
2253
2254         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2255         *outbuf = bp->gunzip_buf;
2256
2257         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2258                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2259                        bp->dev->name, bp->strm->msg);
2260
2261         zlib_inflateEnd(bp->strm);
2262
2263         if (rc == Z_STREAM_END)
2264                 return 0;
2265
2266         return rc;
2267 }
2268
2269 static void
2270 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2271         u32 rv2p_proc)
2272 {
2273         int i;
2274         u32 val;
2275
2276
2277         for (i = 0; i < rv2p_code_len; i += 8) {
2278                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2279                 rv2p_code++;
2280                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2281                 rv2p_code++;
2282
2283                 if (rv2p_proc == RV2P_PROC1) {
2284                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2285                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2286                 }
2287                 else {
2288                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2289                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2290                 }
2291         }
2292
2293         /* Reset the processor, un-stall is done later. */
2294         if (rv2p_proc == RV2P_PROC1) {
2295                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2296         }
2297         else {
2298                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2299         }
2300 }
2301
2302 static int
2303 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2304 {
2305         u32 offset;
2306         u32 val;
2307         int rc;
2308
2309         /* Halt the CPU. */
2310         val = REG_RD_IND(bp, cpu_reg->mode);
2311         val |= cpu_reg->mode_value_halt;
2312         REG_WR_IND(bp, cpu_reg->mode, val);
2313         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2314
2315         /* Load the Text area. */
2316         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2317         if (fw->gz_text) {
2318                 u32 text_len;
2319                 void *text;
2320
2321                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2322                                  &text_len);
2323                 if (rc)
2324                         return rc;
2325
2326                 fw->text = text;
2327         }
2328         if (fw->gz_text) {
2329                 int j;
2330
2331                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2332                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2333                 }
2334         }
2335
2336         /* Load the Data area. */
2337         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2338         if (fw->data) {
2339                 int j;
2340
2341                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2342                         REG_WR_IND(bp, offset, fw->data[j]);
2343                 }
2344         }
2345
2346         /* Load the SBSS area. */
2347         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2348         if (fw->sbss) {
2349                 int j;
2350
2351                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2352                         REG_WR_IND(bp, offset, fw->sbss[j]);
2353                 }
2354         }
2355
2356         /* Load the BSS area. */
2357         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2358         if (fw->bss) {
2359                 int j;
2360
2361                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2362                         REG_WR_IND(bp, offset, fw->bss[j]);
2363                 }
2364         }
2365
2366         /* Load the Read-Only area. */
2367         offset = cpu_reg->spad_base +
2368                 (fw->rodata_addr - cpu_reg->mips_view_base);
2369         if (fw->rodata) {
2370                 int j;
2371
2372                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2373                         REG_WR_IND(bp, offset, fw->rodata[j]);
2374                 }
2375         }
2376
2377         /* Clear the pre-fetch instruction. */
2378         REG_WR_IND(bp, cpu_reg->inst, 0);
2379         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2380
2381         /* Start the CPU. */
2382         val = REG_RD_IND(bp, cpu_reg->mode);
2383         val &= ~cpu_reg->mode_value_halt;
2384         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2385         REG_WR_IND(bp, cpu_reg->mode, val);
2386
2387         return 0;
2388 }
2389
2390 static int
2391 bnx2_init_cpus(struct bnx2 *bp)
2392 {
2393         struct cpu_reg cpu_reg;
2394         struct fw_info *fw;
2395         int rc = 0;
2396         void *text;
2397         u32 text_len;
2398
2399         if ((rc = bnx2_gunzip_init(bp)) != 0)
2400                 return rc;
2401
2402         /* Initialize the RV2P processor. */
2403         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2404                          &text_len);
2405         if (rc)
2406                 goto init_cpu_err;
2407
2408         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2409
2410         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2411                          &text_len);
2412         if (rc)
2413                 goto init_cpu_err;
2414
2415         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2416
2417         /* Initialize the RX Processor. */
2418         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2419         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2420         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2421         cpu_reg.state = BNX2_RXP_CPU_STATE;
2422         cpu_reg.state_value_clear = 0xffffff;
2423         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2424         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2425         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2426         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2427         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2428         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2429         cpu_reg.mips_view_base = 0x8000000;
2430
2431         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2432                 fw = &bnx2_rxp_fw_09;
2433         else
2434                 fw = &bnx2_rxp_fw_06;
2435
2436         rc = load_cpu_fw(bp, &cpu_reg, fw);
2437         if (rc)
2438                 goto init_cpu_err;
2439
2440         /* Initialize the TX Processor. */
2441         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2442         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2443         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2444         cpu_reg.state = BNX2_TXP_CPU_STATE;
2445         cpu_reg.state_value_clear = 0xffffff;
2446         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2447         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2448         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2449         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2450         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2451         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2452         cpu_reg.mips_view_base = 0x8000000;
2453
2454         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2455                 fw = &bnx2_txp_fw_09;
2456         else
2457                 fw = &bnx2_txp_fw_06;
2458
2459         rc = load_cpu_fw(bp, &cpu_reg, fw);
2460         if (rc)
2461                 goto init_cpu_err;
2462
2463         /* Initialize the TX Patch-up Processor. */
2464         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2465         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2466         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2467         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2468         cpu_reg.state_value_clear = 0xffffff;
2469         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2470         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2471         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2472         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2473         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2474         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2475         cpu_reg.mips_view_base = 0x8000000;
2476
2477         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2478                 fw = &bnx2_tpat_fw_09;
2479         else
2480                 fw = &bnx2_tpat_fw_06;
2481
2482         rc = load_cpu_fw(bp, &cpu_reg, fw);
2483         if (rc)
2484                 goto init_cpu_err;
2485
2486         /* Initialize the Completion Processor. */
2487         cpu_reg.mode = BNX2_COM_CPU_MODE;
2488         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2489         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2490         cpu_reg.state = BNX2_COM_CPU_STATE;
2491         cpu_reg.state_value_clear = 0xffffff;
2492         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2493         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2494         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2495         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2496         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2497         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2498         cpu_reg.mips_view_base = 0x8000000;
2499
2500         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2501                 fw = &bnx2_com_fw_09;
2502         else
2503                 fw = &bnx2_com_fw_06;
2504
2505         rc = load_cpu_fw(bp, &cpu_reg, fw);
2506         if (rc)
2507                 goto init_cpu_err;
2508
2509         /* Initialize the Command Processor. */
2510         cpu_reg.mode = BNX2_CP_CPU_MODE;
2511         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2512         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2513         cpu_reg.state = BNX2_CP_CPU_STATE;
2514         cpu_reg.state_value_clear = 0xffffff;
2515         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2516         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2517         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2518         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2519         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2520         cpu_reg.spad_base = BNX2_CP_SCRATCH;
2521         cpu_reg.mips_view_base = 0x8000000;
2522
2523         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2524                 fw = &bnx2_cp_fw_09;
2525
2526                 rc = load_cpu_fw(bp, &cpu_reg, fw);
2527                 if (rc)
2528                         goto init_cpu_err;
2529         }
2530 init_cpu_err:
2531         bnx2_gunzip_end(bp);
2532         return rc;
2533 }
2534
2535 static int
2536 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2537 {
2538         u16 pmcsr;
2539
2540         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2541
2542         switch (state) {
2543         case PCI_D0: {
2544                 u32 val;
2545
2546                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2547                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2548                         PCI_PM_CTRL_PME_STATUS);
2549
2550                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2551                         /* delay required during transition out of D3hot */
2552                         msleep(20);
2553
2554                 val = REG_RD(bp, BNX2_EMAC_MODE);
2555                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2556                 val &= ~BNX2_EMAC_MODE_MPKT;
2557                 REG_WR(bp, BNX2_EMAC_MODE, val);
2558
2559                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2560                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2561                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2562                 break;
2563         }
2564         case PCI_D3hot: {
2565                 int i;
2566                 u32 val, wol_msg;
2567
2568                 if (bp->wol) {
2569                         u32 advertising;
2570                         u8 autoneg;
2571
2572                         autoneg = bp->autoneg;
2573                         advertising = bp->advertising;
2574
2575                         bp->autoneg = AUTONEG_SPEED;
2576                         bp->advertising = ADVERTISED_10baseT_Half |
2577                                 ADVERTISED_10baseT_Full |
2578                                 ADVERTISED_100baseT_Half |
2579                                 ADVERTISED_100baseT_Full |
2580                                 ADVERTISED_Autoneg;
2581
2582                         bnx2_setup_copper_phy(bp);
2583
2584                         bp->autoneg = autoneg;
2585                         bp->advertising = advertising;
2586
2587                         bnx2_set_mac_addr(bp);
2588
2589                         val = REG_RD(bp, BNX2_EMAC_MODE);
2590
2591                         /* Enable port mode. */
2592                         val &= ~BNX2_EMAC_MODE_PORT;
2593                         val |= BNX2_EMAC_MODE_PORT_MII |
2594                                BNX2_EMAC_MODE_MPKT_RCVD |
2595                                BNX2_EMAC_MODE_ACPI_RCVD |
2596                                BNX2_EMAC_MODE_MPKT;
2597
2598                         REG_WR(bp, BNX2_EMAC_MODE, val);
2599
2600                         /* receive all multicast */
2601                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2602                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2603                                        0xffffffff);
2604                         }
2605                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2606                                BNX2_EMAC_RX_MODE_SORT_MODE);
2607
2608                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2609                               BNX2_RPM_SORT_USER0_MC_EN;
2610                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2611                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2612                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2613                                BNX2_RPM_SORT_USER0_ENA);
2614
2615                         /* Need to enable EMAC and RPM for WOL. */
2616                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2617                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2618                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2619                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2620
2621                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2622                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2623                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2624
2625                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2626                 }
2627                 else {
2628                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2629                 }
2630
2631                 if (!(bp->flags & NO_WOL_FLAG))
2632                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2633
2634                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2635                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2636                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2637
2638                         if (bp->wol)
2639                                 pmcsr |= 3;
2640                 }
2641                 else {
2642                         pmcsr |= 3;
2643                 }
2644                 if (bp->wol) {
2645                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2646                 }
2647                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2648                                       pmcsr);
2649
2650                 /* No more memory access after this point until
2651                  * device is brought back to D0.
2652                  */
2653                 udelay(50);
2654                 break;
2655         }
2656         default:
2657                 return -EINVAL;
2658         }
2659         return 0;
2660 }
2661
2662 static int
2663 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2664 {
2665         u32 val;
2666         int j;
2667
2668         /* Request access to the flash interface. */
2669         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2670         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2671                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2672                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2673                         break;
2674
2675                 udelay(5);
2676         }
2677
2678         if (j >= NVRAM_TIMEOUT_COUNT)
2679                 return -EBUSY;
2680
2681         return 0;
2682 }
2683
2684 static int
2685 bnx2_release_nvram_lock(struct bnx2 *bp)
2686 {
2687         int j;
2688         u32 val;
2689
2690         /* Relinquish nvram interface. */
2691         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2692
2693         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2694                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2695                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2696                         break;
2697
2698                 udelay(5);
2699         }
2700
2701         if (j >= NVRAM_TIMEOUT_COUNT)
2702                 return -EBUSY;
2703
2704         return 0;
2705 }
2706
2707
2708 static int
2709 bnx2_enable_nvram_write(struct bnx2 *bp)
2710 {
2711         u32 val;
2712
2713         val = REG_RD(bp, BNX2_MISC_CFG);
2714         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2715
2716         if (!bp->flash_info->buffered) {
2717                 int j;
2718
2719                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2720                 REG_WR(bp, BNX2_NVM_COMMAND,
2721                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2722
2723                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2724                         udelay(5);
2725
2726                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2727                         if (val & BNX2_NVM_COMMAND_DONE)
2728                                 break;
2729                 }
2730
2731                 if (j >= NVRAM_TIMEOUT_COUNT)
2732                         return -EBUSY;
2733         }
2734         return 0;
2735 }
2736
2737 static void
2738 bnx2_disable_nvram_write(struct bnx2 *bp)
2739 {
2740         u32 val;
2741
2742         val = REG_RD(bp, BNX2_MISC_CFG);
2743         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2744 }
2745
2746
2747 static void
2748 bnx2_enable_nvram_access(struct bnx2 *bp)
2749 {
2750         u32 val;
2751
2752         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2753         /* Enable both bits, even on read. */
2754         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2755                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2756 }
2757
2758 static void
2759 bnx2_disable_nvram_access(struct bnx2 *bp)
2760 {
2761         u32 val;
2762
2763         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2764         /* Disable both bits, even after read. */
2765         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2766                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2767                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2768 }
2769
2770 static int
2771 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2772 {
2773         u32 cmd;
2774         int j;
2775
2776         if (bp->flash_info->buffered)
2777                 /* Buffered flash, no erase needed */
2778                 return 0;
2779
2780         /* Build an erase command */
2781         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2782               BNX2_NVM_COMMAND_DOIT;
2783
2784         /* Need to clear DONE bit separately. */
2785         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2786
2787         /* Address of the NVRAM to read from. */
2788         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2789
2790         /* Issue an erase command. */
2791         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2792
2793         /* Wait for completion. */
2794         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2795                 u32 val;
2796
2797                 udelay(5);
2798
2799                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2800                 if (val & BNX2_NVM_COMMAND_DONE)
2801                         break;
2802         }
2803
2804         if (j >= NVRAM_TIMEOUT_COUNT)
2805                 return -EBUSY;
2806
2807         return 0;
2808 }
2809
2810 static int
2811 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2812 {
2813         u32 cmd;
2814         int j;
2815
2816         /* Build the command word. */
2817         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2818
2819         /* Calculate an offset of a buffered flash. */
2820         if (bp->flash_info->buffered) {
2821                 offset = ((offset / bp->flash_info->page_size) <<
2822                            bp->flash_info->page_bits) +
2823                           (offset % bp->flash_info->page_size);
2824         }
2825
2826         /* Need to clear DONE bit separately. */
2827         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2828
2829         /* Address of the NVRAM to read from. */
2830         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2831
2832         /* Issue a read command. */
2833         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2834
2835         /* Wait for completion. */
2836         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2837                 u32 val;
2838
2839                 udelay(5);
2840
2841                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2842                 if (val & BNX2_NVM_COMMAND_DONE) {
2843                         val = REG_RD(bp, BNX2_NVM_READ);
2844
2845                         val = be32_to_cpu(val);
2846                         memcpy(ret_val, &val, 4);
2847                         break;
2848                 }
2849         }
2850         if (j >= NVRAM_TIMEOUT_COUNT)
2851                 return -EBUSY;
2852
2853         return 0;
2854 }
2855
2856
2857 static int
2858 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2859 {
2860         u32 cmd, val32;
2861         int j;
2862
2863         /* Build the command word. */
2864         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2865
2866         /* Calculate an offset of a buffered flash. */
2867         if (bp->flash_info->buffered) {
2868                 offset = ((offset / bp->flash_info->page_size) <<
2869                           bp->flash_info->page_bits) +
2870                          (offset % bp->flash_info->page_size);
2871         }
2872
2873         /* Need to clear DONE bit separately. */
2874         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2875
2876         memcpy(&val32, val, 4);
2877         val32 = cpu_to_be32(val32);
2878
2879         /* Write the data. */
2880         REG_WR(bp, BNX2_NVM_WRITE, val32);
2881
2882         /* Address of the NVRAM to write to. */
2883         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2884
2885         /* Issue the write command. */
2886         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2887
2888         /* Wait for completion. */
2889         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2890                 udelay(5);
2891
2892                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2893                         break;
2894         }
2895         if (j >= NVRAM_TIMEOUT_COUNT)
2896                 return -EBUSY;
2897
2898         return 0;
2899 }
2900
2901 static int
2902 bnx2_init_nvram(struct bnx2 *bp)
2903 {
2904         u32 val;
2905         int j, entry_count, rc;
2906         struct flash_spec *flash;
2907
2908         /* Determine the selected interface. */
2909         val = REG_RD(bp, BNX2_NVM_CFG1);
2910
2911         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2912
2913         rc = 0;
2914         if (val & 0x40000000) {
2915
2916                 /* Flash interface has been reconfigured */
2917                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2918                      j++, flash++) {
2919                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2920                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2921                                 bp->flash_info = flash;
2922                                 break;
2923                         }
2924                 }
2925         }
2926         else {
2927                 u32 mask;
2928                 /* Not yet been reconfigured */
2929
2930                 if (val & (1 << 23))
2931                         mask = FLASH_BACKUP_STRAP_MASK;
2932                 else
2933                         mask = FLASH_STRAP_MASK;
2934
2935                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2936                         j++, flash++) {
2937
2938                         if ((val & mask) == (flash->strapping & mask)) {
2939                                 bp->flash_info = flash;
2940
2941                                 /* Request access to the flash interface. */
2942                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2943                                         return rc;
2944
2945                                 /* Enable access to flash interface */
2946                                 bnx2_enable_nvram_access(bp);
2947
2948                                 /* Reconfigure the flash interface */
2949                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2950                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2951                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2952                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2953
2954                                 /* Disable access to flash interface */
2955                                 bnx2_disable_nvram_access(bp);
2956                                 bnx2_release_nvram_lock(bp);
2957
2958                                 break;
2959                         }
2960                 }
2961         } /* if (val & 0x40000000) */
2962
2963         if (j == entry_count) {
2964                 bp->flash_info = NULL;
2965                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2966                 return -ENODEV;
2967         }
2968
2969         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2970         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2971         if (val)
2972                 bp->flash_size = val;
2973         else
2974                 bp->flash_size = bp->flash_info->total_size;
2975
2976         return rc;
2977 }
2978
2979 static int
2980 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2981                 int buf_size)
2982 {
2983         int rc = 0;
2984         u32 cmd_flags, offset32, len32, extra;
2985
2986         if (buf_size == 0)
2987                 return 0;
2988
2989         /* Request access to the flash interface. */
2990         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2991                 return rc;
2992
2993         /* Enable access to flash interface */
2994         bnx2_enable_nvram_access(bp);
2995
2996         len32 = buf_size;
2997         offset32 = offset;
2998         extra = 0;
2999
3000         cmd_flags = 0;
3001
3002         if (offset32 & 3) {
3003                 u8 buf[4];
3004                 u32 pre_len;
3005
3006                 offset32 &= ~3;
3007                 pre_len = 4 - (offset & 3);
3008
3009                 if (pre_len >= len32) {
3010                         pre_len = len32;
3011                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3012                                     BNX2_NVM_COMMAND_LAST;
3013                 }
3014                 else {
3015                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3016                 }
3017
3018                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3019
3020                 if (rc)
3021                         return rc;
3022
3023                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3024
3025                 offset32 += 4;
3026                 ret_buf += pre_len;
3027                 len32 -= pre_len;
3028         }
3029         if (len32 & 3) {
3030                 extra = 4 - (len32 & 3);
3031                 len32 = (len32 + 4) & ~3;
3032         }
3033
3034         if (len32 == 4) {
3035                 u8 buf[4];
3036
3037                 if (cmd_flags)
3038                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3039                 else
3040                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3041                                     BNX2_NVM_COMMAND_LAST;
3042
3043                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3044
3045                 memcpy(ret_buf, buf, 4 - extra);
3046         }
3047         else if (len32 > 0) {
3048                 u8 buf[4];
3049
3050                 /* Read the first word. */
3051                 if (cmd_flags)
3052                         cmd_flags = 0;
3053                 else
3054                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3055
3056                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3057
3058                 /* Advance to the next dword. */
3059                 offset32 += 4;
3060                 ret_buf += 4;
3061                 len32 -= 4;
3062
3063                 while (len32 > 4 && rc == 0) {
3064                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3065
3066                         /* Advance to the next dword. */
3067                         offset32 += 4;
3068                         ret_buf += 4;
3069                         len32 -= 4;
3070                 }
3071
3072                 if (rc)
3073                         return rc;
3074
3075                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3076                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3077
3078                 memcpy(ret_buf, buf, 4 - extra);
3079         }
3080
3081         /* Disable access to flash interface */
3082         bnx2_disable_nvram_access(bp);
3083
3084         bnx2_release_nvram_lock(bp);
3085
3086         return rc;
3087 }
3088
3089 static int
3090 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3091                 int buf_size)
3092 {
3093         u32 written, offset32, len32;
3094         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3095         int rc = 0;
3096         int align_start, align_end;
3097
3098         buf = data_buf;
3099         offset32 = offset;
3100         len32 = buf_size;
3101         align_start = align_end = 0;
3102
3103         if ((align_start = (offset32 & 3))) {
3104                 offset32 &= ~3;
3105                 len32 += (4 - align_start);
3106                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3107                         return rc;
3108         }
3109
3110         if (len32 & 3) {
3111                 if ((len32 > 4) || !align_start) {
3112                         align_end = 4 - (len32 & 3);
3113                         len32 += align_end;
3114                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3115                                 end, 4))) {
3116                                 return rc;
3117                         }
3118                 }
3119         }
3120
3121         if (align_start || align_end) {
3122                 align_buf = kmalloc(len32, GFP_KERNEL);
3123                 if (align_buf == NULL)
3124                         return -ENOMEM;
3125                 if (align_start) {
3126                         memcpy(align_buf, start, 4);
3127                 }
3128                 if (align_end) {
3129                         memcpy(align_buf + len32 - 4, end, 4);
3130                 }
3131                 memcpy(align_buf + align_start, data_buf, buf_size);
3132                 buf = align_buf;
3133         }
3134
3135         if (bp->flash_info->buffered == 0) {
3136                 flash_buffer = kmalloc(264, GFP_KERNEL);
3137                 if (flash_buffer == NULL) {
3138                         rc = -ENOMEM;
3139                         goto nvram_write_end;
3140                 }
3141         }
3142
3143         written = 0;
3144         while ((written < len32) && (rc == 0)) {
3145                 u32 page_start, page_end, data_start, data_end;
3146                 u32 addr, cmd_flags;
3147                 int i;
3148
3149                 /* Find the page_start addr */
3150                 page_start = offset32 + written;
3151                 page_start -= (page_start % bp->flash_info->page_size);
3152                 /* Find the page_end addr */
3153                 page_end = page_start + bp->flash_info->page_size;
3154                 /* Find the data_start addr */
3155                 data_start = (written == 0) ? offset32 : page_start;
3156                 /* Find the data_end addr */
3157                 data_end = (page_end > offset32 + len32) ?
3158                         (offset32 + len32) : page_end;
3159
3160                 /* Request access to the flash interface. */
3161                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3162                         goto nvram_write_end;
3163
3164                 /* Enable access to flash interface */
3165                 bnx2_enable_nvram_access(bp);
3166
3167                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3168                 if (bp->flash_info->buffered == 0) {
3169                         int j;
3170
3171                         /* Read the whole page into the buffer
3172                          * (non-buffer flash only) */
3173                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3174                                 if (j == (bp->flash_info->page_size - 4)) {
3175                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3176                                 }
3177                                 rc = bnx2_nvram_read_dword(bp,
3178                                         page_start + j,
3179                                         &flash_buffer[j],
3180                                         cmd_flags);
3181
3182                                 if (rc)
3183                                         goto nvram_write_end;
3184
3185                                 cmd_flags = 0;
3186                         }
3187                 }
3188
3189                 /* Enable writes to flash interface (unlock write-protect) */
3190                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3191                         goto nvram_write_end;
3192
3193                 /* Erase the page */
3194                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3195                         goto nvram_write_end;
3196
3197                 /* Re-enable the write again for the actual write */
3198                 bnx2_enable_nvram_write(bp);
3199
3200                 /* Loop to write back the buffer data from page_start to
3201                  * data_start */
3202                 i = 0;
3203                 if (bp->flash_info->buffered == 0) {
3204                         for (addr = page_start; addr < data_start;
3205                                 addr += 4, i += 4) {
3206
3207                                 rc = bnx2_nvram_write_dword(bp, addr,
3208                                         &flash_buffer[i], cmd_flags);
3209
3210                                 if (rc != 0)
3211                                         goto nvram_write_end;
3212
3213                                 cmd_flags = 0;
3214                         }
3215                 }
3216
3217                 /* Loop to write the new data from data_start to data_end */
3218                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3219                         if ((addr == page_end - 4) ||
3220                                 ((bp->flash_info->buffered) &&
3221                                  (addr == data_end - 4))) {
3222
3223                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3224                         }
3225                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3226                                 cmd_flags);
3227
3228                         if (rc != 0)
3229                                 goto nvram_write_end;
3230
3231                         cmd_flags = 0;
3232                         buf += 4;
3233                 }
3234
3235                 /* Loop to write back the buffer data from data_end
3236                  * to page_end */
3237                 if (bp->flash_info->buffered == 0) {
3238                         for (addr = data_end; addr < page_end;
3239                                 addr += 4, i += 4) {
3240
3241                                 if (addr == page_end-4) {
3242                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3243                                 }
3244                                 rc = bnx2_nvram_write_dword(bp, addr,
3245                                         &flash_buffer[i], cmd_flags);