Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[sfrench/cifs-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #ifdef NETIF_F_TSO
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #define BCM_TSO 1
47 #endif
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
53
54 #include "bnx2.h"
55 #include "bnx2_fw.h"
56 #include "bnx2_fw2.h"
57
58 #define DRV_MODULE_NAME         "bnx2"
59 #define PFX DRV_MODULE_NAME     ": "
60 #define DRV_MODULE_VERSION      "1.5.3"
61 #define DRV_MODULE_RELDATE      "January 8, 2007"
62
63 #define RUN_AT(x) (jiffies + (x))
64
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT  (5*HZ)
67
68 static const char version[] __devinitdata =
69         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_msi = 0;
77
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81 typedef enum {
82         BCM5706 = 0,
83         NC370T,
84         NC370I,
85         BCM5706S,
86         NC370F,
87         BCM5708,
88         BCM5708S,
89         BCM5709,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         };
105
106 static struct pci_device_id bnx2_pci_tbl[] = {
107         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
109         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
111         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
112           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
113         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
114           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
118           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
120           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
122           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
123         { 0, }
124 };
125
126 static struct flash_spec flash_table[] =
127 {
128         /* Slow EEPROM */
129         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
130          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
132          "EEPROM - slow"},
133         /* Expansion entry 0001 */
134         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
135          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
137          "Entry 0001"},
138         /* Saifun SA25F010 (non-buffered flash) */
139         /* strap, cfg1, & write1 need updates */
140         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
141          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143          "Non-buffered flash (128kB)"},
144         /* Saifun SA25F020 (non-buffered flash) */
145         /* strap, cfg1, & write1 need updates */
146         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
147          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149          "Non-buffered flash (256kB)"},
150         /* Expansion entry 0100 */
151         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154          "Entry 0100"},
155         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
156         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
157          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165         /* Saifun SA25F005 (non-buffered flash) */
166         /* strap, cfg1, & write1 need updates */
167         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170          "Non-buffered flash (64kB)"},
171         /* Fast EEPROM */
172         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
175          "EEPROM - fast"},
176         /* Expansion entry 1001 */
177         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180          "Entry 1001"},
181         /* Expansion entry 1010 */
182         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1010"},
186         /* ATMEL AT45DB011B (buffered flash) */
187         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190          "Buffered flash (128kB)"},
191         /* Expansion entry 1100 */
192         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195          "Entry 1100"},
196         /* Expansion entry 1101 */
197         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1101"},
201         /* Ateml Expansion entry 1110 */
202         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1110 (Atmel)"},
206         /* ATMEL AT45DB021B (buffered flash) */
207         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210          "Buffered flash (256kB)"},
211 };
212
213 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
214
215 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
216 {
217         u32 diff;
218
219         smp_mb();
220
221         /* The ring uses 256 indices for 255 entries, one of them
222          * needs to be skipped.
223          */
224         diff = bp->tx_prod - bp->tx_cons;
225         if (unlikely(diff >= TX_DESC_CNT)) {
226                 diff &= 0xffff;
227                 if (diff == TX_DESC_CNT)
228                         diff = MAX_TX_DESC_CNT;
229         }
230         return (bp->tx_ring_size - diff);
231 }
232
233 static u32
234 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
235 {
236         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
237         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
238 }
239
240 static void
241 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
242 {
243         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
244         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
245 }
246
247 static void
248 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
249 {
250         offset += cid_addr;
251         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
252                 int i;
253
254                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
255                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
256                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
257                 for (i = 0; i < 5; i++) {
258                         u32 val;
259                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
260                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
261                                 break;
262                         udelay(5);
263                 }
264         } else {
265                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
266                 REG_WR(bp, BNX2_CTX_DATA, val);
267         }
268 }
269
270 static int
271 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
272 {
273         u32 val1;
274         int i, ret;
275
276         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
277                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
278                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
279
280                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
281                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
282
283                 udelay(40);
284         }
285
286         val1 = (bp->phy_addr << 21) | (reg << 16) |
287                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
288                 BNX2_EMAC_MDIO_COMM_START_BUSY;
289         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
290
291         for (i = 0; i < 50; i++) {
292                 udelay(10);
293
294                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
295                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
296                         udelay(5);
297
298                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
299                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
300
301                         break;
302                 }
303         }
304
305         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
306                 *val = 0x0;
307                 ret = -EBUSY;
308         }
309         else {
310                 *val = val1;
311                 ret = 0;
312         }
313
314         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
315                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
317
318                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
319                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
320
321                 udelay(40);
322         }
323
324         return ret;
325 }
326
327 static int
328 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
329 {
330         u32 val1;
331         int i, ret;
332
333         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
334                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
335                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
336
337                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
338                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339
340                 udelay(40);
341         }
342
343         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
344                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
345                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
346         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
347
348         for (i = 0; i < 50; i++) {
349                 udelay(10);
350
351                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
352                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
353                         udelay(5);
354                         break;
355                 }
356         }
357
358         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
359                 ret = -EBUSY;
360         else
361                 ret = 0;
362
363         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
364                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
366
367                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
369
370                 udelay(40);
371         }
372
373         return ret;
374 }
375
376 static void
377 bnx2_disable_int(struct bnx2 *bp)
378 {
379         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
380                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
381         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
382 }
383
384 static void
385 bnx2_enable_int(struct bnx2 *bp)
386 {
387         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
388                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
389                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
390
391         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
392                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
393
394         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
395 }
396
397 static void
398 bnx2_disable_int_sync(struct bnx2 *bp)
399 {
400         atomic_inc(&bp->intr_sem);
401         bnx2_disable_int(bp);
402         synchronize_irq(bp->pdev->irq);
403 }
404
405 static void
406 bnx2_netif_stop(struct bnx2 *bp)
407 {
408         bnx2_disable_int_sync(bp);
409         if (netif_running(bp->dev)) {
410                 netif_poll_disable(bp->dev);
411                 netif_tx_disable(bp->dev);
412                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
413         }
414 }
415
416 static void
417 bnx2_netif_start(struct bnx2 *bp)
418 {
419         if (atomic_dec_and_test(&bp->intr_sem)) {
420                 if (netif_running(bp->dev)) {
421                         netif_wake_queue(bp->dev);
422                         netif_poll_enable(bp->dev);
423                         bnx2_enable_int(bp);
424                 }
425         }
426 }
427
428 static void
429 bnx2_free_mem(struct bnx2 *bp)
430 {
431         int i;
432
433         for (i = 0; i < bp->ctx_pages; i++) {
434                 if (bp->ctx_blk[i]) {
435                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
436                                             bp->ctx_blk[i],
437                                             bp->ctx_blk_mapping[i]);
438                         bp->ctx_blk[i] = NULL;
439                 }
440         }
441         if (bp->status_blk) {
442                 pci_free_consistent(bp->pdev, bp->status_stats_size,
443                                     bp->status_blk, bp->status_blk_mapping);
444                 bp->status_blk = NULL;
445                 bp->stats_blk = NULL;
446         }
447         if (bp->tx_desc_ring) {
448                 pci_free_consistent(bp->pdev,
449                                     sizeof(struct tx_bd) * TX_DESC_CNT,
450                                     bp->tx_desc_ring, bp->tx_desc_mapping);
451                 bp->tx_desc_ring = NULL;
452         }
453         kfree(bp->tx_buf_ring);
454         bp->tx_buf_ring = NULL;
455         for (i = 0; i < bp->rx_max_ring; i++) {
456                 if (bp->rx_desc_ring[i])
457                         pci_free_consistent(bp->pdev,
458                                             sizeof(struct rx_bd) * RX_DESC_CNT,
459                                             bp->rx_desc_ring[i],
460                                             bp->rx_desc_mapping[i]);
461                 bp->rx_desc_ring[i] = NULL;
462         }
463         vfree(bp->rx_buf_ring);
464         bp->rx_buf_ring = NULL;
465 }
466
467 static int
468 bnx2_alloc_mem(struct bnx2 *bp)
469 {
470         int i, status_blk_size;
471
472         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
473                                   GFP_KERNEL);
474         if (bp->tx_buf_ring == NULL)
475                 return -ENOMEM;
476
477         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
478                                                 sizeof(struct tx_bd) *
479                                                 TX_DESC_CNT,
480                                                 &bp->tx_desc_mapping);
481         if (bp->tx_desc_ring == NULL)
482                 goto alloc_mem_err;
483
484         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
485                                   bp->rx_max_ring);
486         if (bp->rx_buf_ring == NULL)
487                 goto alloc_mem_err;
488
489         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
490                                    bp->rx_max_ring);
491
492         for (i = 0; i < bp->rx_max_ring; i++) {
493                 bp->rx_desc_ring[i] =
494                         pci_alloc_consistent(bp->pdev,
495                                              sizeof(struct rx_bd) * RX_DESC_CNT,
496                                              &bp->rx_desc_mapping[i]);
497                 if (bp->rx_desc_ring[i] == NULL)
498                         goto alloc_mem_err;
499
500         }
501
502         /* Combine status and statistics blocks into one allocation. */
503         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
504         bp->status_stats_size = status_blk_size +
505                                 sizeof(struct statistics_block);
506
507         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
508                                               &bp->status_blk_mapping);
509         if (bp->status_blk == NULL)
510                 goto alloc_mem_err;
511
512         memset(bp->status_blk, 0, bp->status_stats_size);
513
514         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
515                                   status_blk_size);
516
517         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
518
519         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
520                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
521                 if (bp->ctx_pages == 0)
522                         bp->ctx_pages = 1;
523                 for (i = 0; i < bp->ctx_pages; i++) {
524                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
525                                                 BCM_PAGE_SIZE,
526                                                 &bp->ctx_blk_mapping[i]);
527                         if (bp->ctx_blk[i] == NULL)
528                                 goto alloc_mem_err;
529                 }
530         }
531         return 0;
532
533 alloc_mem_err:
534         bnx2_free_mem(bp);
535         return -ENOMEM;
536 }
537
538 static void
539 bnx2_report_fw_link(struct bnx2 *bp)
540 {
541         u32 fw_link_status = 0;
542
543         if (bp->link_up) {
544                 u32 bmsr;
545
546                 switch (bp->line_speed) {
547                 case SPEED_10:
548                         if (bp->duplex == DUPLEX_HALF)
549                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
550                         else
551                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
552                         break;
553                 case SPEED_100:
554                         if (bp->duplex == DUPLEX_HALF)
555                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
556                         else
557                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
558                         break;
559                 case SPEED_1000:
560                         if (bp->duplex == DUPLEX_HALF)
561                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
562                         else
563                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
564                         break;
565                 case SPEED_2500:
566                         if (bp->duplex == DUPLEX_HALF)
567                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
568                         else
569                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
570                         break;
571                 }
572
573                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
574
575                 if (bp->autoneg) {
576                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
577
578                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
579                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
580
581                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
582                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
583                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
584                         else
585                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
586                 }
587         }
588         else
589                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
590
591         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
592 }
593
594 static void
595 bnx2_report_link(struct bnx2 *bp)
596 {
597         if (bp->link_up) {
598                 netif_carrier_on(bp->dev);
599                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
600
601                 printk("%d Mbps ", bp->line_speed);
602
603                 if (bp->duplex == DUPLEX_FULL)
604                         printk("full duplex");
605                 else
606                         printk("half duplex");
607
608                 if (bp->flow_ctrl) {
609                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
610                                 printk(", receive ");
611                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
612                                         printk("& transmit ");
613                         }
614                         else {
615                                 printk(", transmit ");
616                         }
617                         printk("flow control ON");
618                 }
619                 printk("\n");
620         }
621         else {
622                 netif_carrier_off(bp->dev);
623                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
624         }
625
626         bnx2_report_fw_link(bp);
627 }
628
629 static void
630 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
631 {
632         u32 local_adv, remote_adv;
633
634         bp->flow_ctrl = 0;
635         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
636                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
637
638                 if (bp->duplex == DUPLEX_FULL) {
639                         bp->flow_ctrl = bp->req_flow_ctrl;
640                 }
641                 return;
642         }
643
644         if (bp->duplex != DUPLEX_FULL) {
645                 return;
646         }
647
648         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
649             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
650                 u32 val;
651
652                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
653                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
654                         bp->flow_ctrl |= FLOW_CTRL_TX;
655                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
656                         bp->flow_ctrl |= FLOW_CTRL_RX;
657                 return;
658         }
659
660         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
661         bnx2_read_phy(bp, MII_LPA, &remote_adv);
662
663         if (bp->phy_flags & PHY_SERDES_FLAG) {
664                 u32 new_local_adv = 0;
665                 u32 new_remote_adv = 0;
666
667                 if (local_adv & ADVERTISE_1000XPAUSE)
668                         new_local_adv |= ADVERTISE_PAUSE_CAP;
669                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
670                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
671                 if (remote_adv & ADVERTISE_1000XPAUSE)
672                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
673                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
674                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
675
676                 local_adv = new_local_adv;
677                 remote_adv = new_remote_adv;
678         }
679
680         /* See Table 28B-3 of 802.3ab-1999 spec. */
681         if (local_adv & ADVERTISE_PAUSE_CAP) {
682                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
683                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
684                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
685                         }
686                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
687                                 bp->flow_ctrl = FLOW_CTRL_RX;
688                         }
689                 }
690                 else {
691                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
692                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
693                         }
694                 }
695         }
696         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
697                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
698                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
699
700                         bp->flow_ctrl = FLOW_CTRL_TX;
701                 }
702         }
703 }
704
705 static int
706 bnx2_5708s_linkup(struct bnx2 *bp)
707 {
708         u32 val;
709
710         bp->link_up = 1;
711         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
712         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
713                 case BCM5708S_1000X_STAT1_SPEED_10:
714                         bp->line_speed = SPEED_10;
715                         break;
716                 case BCM5708S_1000X_STAT1_SPEED_100:
717                         bp->line_speed = SPEED_100;
718                         break;
719                 case BCM5708S_1000X_STAT1_SPEED_1G:
720                         bp->line_speed = SPEED_1000;
721                         break;
722                 case BCM5708S_1000X_STAT1_SPEED_2G5:
723                         bp->line_speed = SPEED_2500;
724                         break;
725         }
726         if (val & BCM5708S_1000X_STAT1_FD)
727                 bp->duplex = DUPLEX_FULL;
728         else
729                 bp->duplex = DUPLEX_HALF;
730
731         return 0;
732 }
733
734 static int
735 bnx2_5706s_linkup(struct bnx2 *bp)
736 {
737         u32 bmcr, local_adv, remote_adv, common;
738
739         bp->link_up = 1;
740         bp->line_speed = SPEED_1000;
741
742         bnx2_read_phy(bp, MII_BMCR, &bmcr);
743         if (bmcr & BMCR_FULLDPLX) {
744                 bp->duplex = DUPLEX_FULL;
745         }
746         else {
747                 bp->duplex = DUPLEX_HALF;
748         }
749
750         if (!(bmcr & BMCR_ANENABLE)) {
751                 return 0;
752         }
753
754         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
755         bnx2_read_phy(bp, MII_LPA, &remote_adv);
756
757         common = local_adv & remote_adv;
758         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
759
760                 if (common & ADVERTISE_1000XFULL) {
761                         bp->duplex = DUPLEX_FULL;
762                 }
763                 else {
764                         bp->duplex = DUPLEX_HALF;
765                 }
766         }
767
768         return 0;
769 }
770
771 static int
772 bnx2_copper_linkup(struct bnx2 *bp)
773 {
774         u32 bmcr;
775
776         bnx2_read_phy(bp, MII_BMCR, &bmcr);
777         if (bmcr & BMCR_ANENABLE) {
778                 u32 local_adv, remote_adv, common;
779
780                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
781                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
782
783                 common = local_adv & (remote_adv >> 2);
784                 if (common & ADVERTISE_1000FULL) {
785                         bp->line_speed = SPEED_1000;
786                         bp->duplex = DUPLEX_FULL;
787                 }
788                 else if (common & ADVERTISE_1000HALF) {
789                         bp->line_speed = SPEED_1000;
790                         bp->duplex = DUPLEX_HALF;
791                 }
792                 else {
793                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
794                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
795
796                         common = local_adv & remote_adv;
797                         if (common & ADVERTISE_100FULL) {
798                                 bp->line_speed = SPEED_100;
799                                 bp->duplex = DUPLEX_FULL;
800                         }
801                         else if (common & ADVERTISE_100HALF) {
802                                 bp->line_speed = SPEED_100;
803                                 bp->duplex = DUPLEX_HALF;
804                         }
805                         else if (common & ADVERTISE_10FULL) {
806                                 bp->line_speed = SPEED_10;
807                                 bp->duplex = DUPLEX_FULL;
808                         }
809                         else if (common & ADVERTISE_10HALF) {
810                                 bp->line_speed = SPEED_10;
811                                 bp->duplex = DUPLEX_HALF;
812                         }
813                         else {
814                                 bp->line_speed = 0;
815                                 bp->link_up = 0;
816                         }
817                 }
818         }
819         else {
820                 if (bmcr & BMCR_SPEED100) {
821                         bp->line_speed = SPEED_100;
822                 }
823                 else {
824                         bp->line_speed = SPEED_10;
825                 }
826                 if (bmcr & BMCR_FULLDPLX) {
827                         bp->duplex = DUPLEX_FULL;
828                 }
829                 else {
830                         bp->duplex = DUPLEX_HALF;
831                 }
832         }
833
834         return 0;
835 }
836
837 static int
838 bnx2_set_mac_link(struct bnx2 *bp)
839 {
840         u32 val;
841
842         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
843         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
844                 (bp->duplex == DUPLEX_HALF)) {
845                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
846         }
847
848         /* Configure the EMAC mode register. */
849         val = REG_RD(bp, BNX2_EMAC_MODE);
850
851         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
852                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
853                 BNX2_EMAC_MODE_25G_MODE);
854
855         if (bp->link_up) {
856                 switch (bp->line_speed) {
857                         case SPEED_10:
858                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
859                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
860                                         break;
861                                 }
862                                 /* fall through */
863                         case SPEED_100:
864                                 val |= BNX2_EMAC_MODE_PORT_MII;
865                                 break;
866                         case SPEED_2500:
867                                 val |= BNX2_EMAC_MODE_25G_MODE;
868                                 /* fall through */
869                         case SPEED_1000:
870                                 val |= BNX2_EMAC_MODE_PORT_GMII;
871                                 break;
872                 }
873         }
874         else {
875                 val |= BNX2_EMAC_MODE_PORT_GMII;
876         }
877
878         /* Set the MAC to operate in the appropriate duplex mode. */
879         if (bp->duplex == DUPLEX_HALF)
880                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
881         REG_WR(bp, BNX2_EMAC_MODE, val);
882
883         /* Enable/disable rx PAUSE. */
884         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
885
886         if (bp->flow_ctrl & FLOW_CTRL_RX)
887                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
888         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
889
890         /* Enable/disable tx PAUSE. */
891         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
892         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
893
894         if (bp->flow_ctrl & FLOW_CTRL_TX)
895                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
896         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
897
898         /* Acknowledge the interrupt. */
899         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
900
901         return 0;
902 }
903
904 static int
905 bnx2_set_link(struct bnx2 *bp)
906 {
907         u32 bmsr;
908         u8 link_up;
909
910         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
911                 bp->link_up = 1;
912                 return 0;
913         }
914
915         link_up = bp->link_up;
916
917         bnx2_read_phy(bp, MII_BMSR, &bmsr);
918         bnx2_read_phy(bp, MII_BMSR, &bmsr);
919
920         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
921             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
922                 u32 val;
923
924                 val = REG_RD(bp, BNX2_EMAC_STATUS);
925                 if (val & BNX2_EMAC_STATUS_LINK)
926                         bmsr |= BMSR_LSTATUS;
927                 else
928                         bmsr &= ~BMSR_LSTATUS;
929         }
930
931         if (bmsr & BMSR_LSTATUS) {
932                 bp->link_up = 1;
933
934                 if (bp->phy_flags & PHY_SERDES_FLAG) {
935                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
936                                 bnx2_5706s_linkup(bp);
937                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
938                                 bnx2_5708s_linkup(bp);
939                 }
940                 else {
941                         bnx2_copper_linkup(bp);
942                 }
943                 bnx2_resolve_flow_ctrl(bp);
944         }
945         else {
946                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
947                         (bp->autoneg & AUTONEG_SPEED)) {
948
949                         u32 bmcr;
950
951                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
952                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
953                         if (!(bmcr & BMCR_ANENABLE)) {
954                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
955                                         BMCR_ANENABLE);
956                         }
957                 }
958                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
959                 bp->link_up = 0;
960         }
961
962         if (bp->link_up != link_up) {
963                 bnx2_report_link(bp);
964         }
965
966         bnx2_set_mac_link(bp);
967
968         return 0;
969 }
970
971 static int
972 bnx2_reset_phy(struct bnx2 *bp)
973 {
974         int i;
975         u32 reg;
976
977         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
978
979 #define PHY_RESET_MAX_WAIT 100
980         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
981                 udelay(10);
982
983                 bnx2_read_phy(bp, MII_BMCR, &reg);
984                 if (!(reg & BMCR_RESET)) {
985                         udelay(20);
986                         break;
987                 }
988         }
989         if (i == PHY_RESET_MAX_WAIT) {
990                 return -EBUSY;
991         }
992         return 0;
993 }
994
995 static u32
996 bnx2_phy_get_pause_adv(struct bnx2 *bp)
997 {
998         u32 adv = 0;
999
1000         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1001                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1002
1003                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1004                         adv = ADVERTISE_1000XPAUSE;
1005                 }
1006                 else {
1007                         adv = ADVERTISE_PAUSE_CAP;
1008                 }
1009         }
1010         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1011                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1012                         adv = ADVERTISE_1000XPSE_ASYM;
1013                 }
1014                 else {
1015                         adv = ADVERTISE_PAUSE_ASYM;
1016                 }
1017         }
1018         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1019                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1020                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1021                 }
1022                 else {
1023                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1024                 }
1025         }
1026         return adv;
1027 }
1028
1029 static int
1030 bnx2_setup_serdes_phy(struct bnx2 *bp)
1031 {
1032         u32 adv, bmcr, up1;
1033         u32 new_adv = 0;
1034
1035         if (!(bp->autoneg & AUTONEG_SPEED)) {
1036                 u32 new_bmcr;
1037                 int force_link_down = 0;
1038
1039                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1040                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1041
1042                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1043                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1044                 new_bmcr |= BMCR_SPEED1000;
1045                 if (bp->req_line_speed == SPEED_2500) {
1046                         new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1047                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1048                         if (!(up1 & BCM5708S_UP1_2G5)) {
1049                                 up1 |= BCM5708S_UP1_2G5;
1050                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1051                                 force_link_down = 1;
1052                         }
1053                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1054                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1055                         if (up1 & BCM5708S_UP1_2G5) {
1056                                 up1 &= ~BCM5708S_UP1_2G5;
1057                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1058                                 force_link_down = 1;
1059                         }
1060                 }
1061
1062                 if (bp->req_duplex == DUPLEX_FULL) {
1063                         adv |= ADVERTISE_1000XFULL;
1064                         new_bmcr |= BMCR_FULLDPLX;
1065                 }
1066                 else {
1067                         adv |= ADVERTISE_1000XHALF;
1068                         new_bmcr &= ~BMCR_FULLDPLX;
1069                 }
1070                 if ((new_bmcr != bmcr) || (force_link_down)) {
1071                         /* Force a link down visible on the other side */
1072                         if (bp->link_up) {
1073                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1074                                                ~(ADVERTISE_1000XFULL |
1075                                                  ADVERTISE_1000XHALF));
1076                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
1077                                         BMCR_ANRESTART | BMCR_ANENABLE);
1078
1079                                 bp->link_up = 0;
1080                                 netif_carrier_off(bp->dev);
1081                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1082                                 bnx2_report_link(bp);
1083                         }
1084                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
1085                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1086                 }
1087                 return 0;
1088         }
1089
1090         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1091                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1092                 up1 |= BCM5708S_UP1_2G5;
1093                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1094         }
1095
1096         if (bp->advertising & ADVERTISED_1000baseT_Full)
1097                 new_adv |= ADVERTISE_1000XFULL;
1098
1099         new_adv |= bnx2_phy_get_pause_adv(bp);
1100
1101         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1102         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1103
1104         bp->serdes_an_pending = 0;
1105         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1106                 /* Force a link down visible on the other side */
1107                 if (bp->link_up) {
1108                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1109                         spin_unlock_bh(&bp->phy_lock);
1110                         msleep(20);
1111                         spin_lock_bh(&bp->phy_lock);
1112                 }
1113
1114                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1115                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1116                         BMCR_ANENABLE);
1117                 /* Speed up link-up time when the link partner
1118                  * does not autonegotiate which is very common
1119                  * in blade servers. Some blade servers use
1120                  * IPMI for kerboard input and it's important
1121                  * to minimize link disruptions. Autoneg. involves
1122                  * exchanging base pages plus 3 next pages and
1123                  * normally completes in about 120 msec.
1124                  */
1125                 bp->current_interval = SERDES_AN_TIMEOUT;
1126                 bp->serdes_an_pending = 1;
1127                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1128         }
1129
1130         return 0;
1131 }
1132
1133 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1134         (ADVERTISED_1000baseT_Full)
1135
1136 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1137         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1138         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1139         ADVERTISED_1000baseT_Full)
1140
1141 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1142         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1143
1144 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1145
1146 static int
1147 bnx2_setup_copper_phy(struct bnx2 *bp)
1148 {
1149         u32 bmcr;
1150         u32 new_bmcr;
1151
1152         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1153
1154         if (bp->autoneg & AUTONEG_SPEED) {
1155                 u32 adv_reg, adv1000_reg;
1156                 u32 new_adv_reg = 0;
1157                 u32 new_adv1000_reg = 0;
1158
1159                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1160                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1161                         ADVERTISE_PAUSE_ASYM);
1162
1163                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1164                 adv1000_reg &= PHY_ALL_1000_SPEED;
1165
1166                 if (bp->advertising & ADVERTISED_10baseT_Half)
1167                         new_adv_reg |= ADVERTISE_10HALF;
1168                 if (bp->advertising & ADVERTISED_10baseT_Full)
1169                         new_adv_reg |= ADVERTISE_10FULL;
1170                 if (bp->advertising & ADVERTISED_100baseT_Half)
1171                         new_adv_reg |= ADVERTISE_100HALF;
1172                 if (bp->advertising & ADVERTISED_100baseT_Full)
1173                         new_adv_reg |= ADVERTISE_100FULL;
1174                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1175                         new_adv1000_reg |= ADVERTISE_1000FULL;
1176
1177                 new_adv_reg |= ADVERTISE_CSMA;
1178
1179                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1180
1181                 if ((adv1000_reg != new_adv1000_reg) ||
1182                         (adv_reg != new_adv_reg) ||
1183                         ((bmcr & BMCR_ANENABLE) == 0)) {
1184
1185                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1186                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1187                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1188                                 BMCR_ANENABLE);
1189                 }
1190                 else if (bp->link_up) {
1191                         /* Flow ctrl may have changed from auto to forced */
1192                         /* or vice-versa. */
1193
1194                         bnx2_resolve_flow_ctrl(bp);
1195                         bnx2_set_mac_link(bp);
1196                 }
1197                 return 0;
1198         }
1199
1200         new_bmcr = 0;
1201         if (bp->req_line_speed == SPEED_100) {
1202                 new_bmcr |= BMCR_SPEED100;
1203         }
1204         if (bp->req_duplex == DUPLEX_FULL) {
1205                 new_bmcr |= BMCR_FULLDPLX;
1206         }
1207         if (new_bmcr != bmcr) {
1208                 u32 bmsr;
1209
1210                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1211                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1212
1213                 if (bmsr & BMSR_LSTATUS) {
1214                         /* Force link down */
1215                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1216                         spin_unlock_bh(&bp->phy_lock);
1217                         msleep(50);
1218                         spin_lock_bh(&bp->phy_lock);
1219
1220                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1221                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1222                 }
1223
1224                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1225
1226                 /* Normally, the new speed is setup after the link has
1227                  * gone down and up again. In some cases, link will not go
1228                  * down so we need to set up the new speed here.
1229                  */
1230                 if (bmsr & BMSR_LSTATUS) {
1231                         bp->line_speed = bp->req_line_speed;
1232                         bp->duplex = bp->req_duplex;
1233                         bnx2_resolve_flow_ctrl(bp);
1234                         bnx2_set_mac_link(bp);
1235                 }
1236         }
1237         return 0;
1238 }
1239
1240 static int
1241 bnx2_setup_phy(struct bnx2 *bp)
1242 {
1243         if (bp->loopback == MAC_LOOPBACK)
1244                 return 0;
1245
1246         if (bp->phy_flags & PHY_SERDES_FLAG) {
1247                 return (bnx2_setup_serdes_phy(bp));
1248         }
1249         else {
1250                 return (bnx2_setup_copper_phy(bp));
1251         }
1252 }
1253
1254 static int
1255 bnx2_init_5708s_phy(struct bnx2 *bp)
1256 {
1257         u32 val;
1258
1259         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1260         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1261         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1262
1263         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1264         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1265         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1266
1267         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1268         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1269         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1270
1271         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1272                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1273                 val |= BCM5708S_UP1_2G5;
1274                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1275         }
1276
1277         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1278             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1279             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1280                 /* increase tx signal amplitude */
1281                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1282                                BCM5708S_BLK_ADDR_TX_MISC);
1283                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1284                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1285                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1286                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1287         }
1288
1289         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1290               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1291
1292         if (val) {
1293                 u32 is_backplane;
1294
1295                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1296                                           BNX2_SHARED_HW_CFG_CONFIG);
1297                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1298                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299                                        BCM5708S_BLK_ADDR_TX_MISC);
1300                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1301                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1302                                        BCM5708S_BLK_ADDR_DIG);
1303                 }
1304         }
1305         return 0;
1306 }
1307
1308 static int
1309 bnx2_init_5706s_phy(struct bnx2 *bp)
1310 {
1311         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1312
1313         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1314                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1315
1316         if (bp->dev->mtu > 1500) {
1317                 u32 val;
1318
1319                 /* Set extended packet length bit */
1320                 bnx2_write_phy(bp, 0x18, 0x7);
1321                 bnx2_read_phy(bp, 0x18, &val);
1322                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1323
1324                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1325                 bnx2_read_phy(bp, 0x1c, &val);
1326                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1327         }
1328         else {
1329                 u32 val;
1330
1331                 bnx2_write_phy(bp, 0x18, 0x7);
1332                 bnx2_read_phy(bp, 0x18, &val);
1333                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1334
1335                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1336                 bnx2_read_phy(bp, 0x1c, &val);
1337                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1338         }
1339
1340         return 0;
1341 }
1342
1343 static int
1344 bnx2_init_copper_phy(struct bnx2 *bp)
1345 {
1346         u32 val;
1347
1348         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1349                 bnx2_write_phy(bp, 0x18, 0x0c00);
1350                 bnx2_write_phy(bp, 0x17, 0x000a);
1351                 bnx2_write_phy(bp, 0x15, 0x310b);
1352                 bnx2_write_phy(bp, 0x17, 0x201f);
1353                 bnx2_write_phy(bp, 0x15, 0x9506);
1354                 bnx2_write_phy(bp, 0x17, 0x401f);
1355                 bnx2_write_phy(bp, 0x15, 0x14e2);
1356                 bnx2_write_phy(bp, 0x18, 0x0400);
1357         }
1358
1359         if (bp->dev->mtu > 1500) {
1360                 /* Set extended packet length bit */
1361                 bnx2_write_phy(bp, 0x18, 0x7);
1362                 bnx2_read_phy(bp, 0x18, &val);
1363                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1364
1365                 bnx2_read_phy(bp, 0x10, &val);
1366                 bnx2_write_phy(bp, 0x10, val | 0x1);
1367         }
1368         else {
1369                 bnx2_write_phy(bp, 0x18, 0x7);
1370                 bnx2_read_phy(bp, 0x18, &val);
1371                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1372
1373                 bnx2_read_phy(bp, 0x10, &val);
1374                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1375         }
1376
1377         /* ethernet@wirespeed */
1378         bnx2_write_phy(bp, 0x18, 0x7007);
1379         bnx2_read_phy(bp, 0x18, &val);
1380         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1381         return 0;
1382 }
1383
1384
1385 static int
1386 bnx2_init_phy(struct bnx2 *bp)
1387 {
1388         u32 val;
1389         int rc = 0;
1390
1391         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1392         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1393
1394         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1395
1396         bnx2_reset_phy(bp);
1397
1398         bnx2_read_phy(bp, MII_PHYSID1, &val);
1399         bp->phy_id = val << 16;
1400         bnx2_read_phy(bp, MII_PHYSID2, &val);
1401         bp->phy_id |= val & 0xffff;
1402
1403         if (bp->phy_flags & PHY_SERDES_FLAG) {
1404                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1405                         rc = bnx2_init_5706s_phy(bp);
1406                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1407                         rc = bnx2_init_5708s_phy(bp);
1408         }
1409         else {
1410                 rc = bnx2_init_copper_phy(bp);
1411         }
1412
1413         bnx2_setup_phy(bp);
1414
1415         return rc;
1416 }
1417
1418 static int
1419 bnx2_set_mac_loopback(struct bnx2 *bp)
1420 {
1421         u32 mac_mode;
1422
1423         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1424         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1425         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1426         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1427         bp->link_up = 1;
1428         return 0;
1429 }
1430
1431 static int bnx2_test_link(struct bnx2 *);
1432
1433 static int
1434 bnx2_set_phy_loopback(struct bnx2 *bp)
1435 {
1436         u32 mac_mode;
1437         int rc, i;
1438
1439         spin_lock_bh(&bp->phy_lock);
1440         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1441                             BMCR_SPEED1000);
1442         spin_unlock_bh(&bp->phy_lock);
1443         if (rc)
1444                 return rc;
1445
1446         for (i = 0; i < 10; i++) {
1447                 if (bnx2_test_link(bp) == 0)
1448                         break;
1449                 msleep(100);
1450         }
1451
1452         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1453         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1454                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1455                       BNX2_EMAC_MODE_25G_MODE);
1456
1457         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1458         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1459         bp->link_up = 1;
1460         return 0;
1461 }
1462
1463 static int
1464 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1465 {
1466         int i;
1467         u32 val;
1468
1469         bp->fw_wr_seq++;
1470         msg_data |= bp->fw_wr_seq;
1471
1472         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1473
1474         /* wait for an acknowledgement. */
1475         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1476                 msleep(10);
1477
1478                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1479
1480                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1481                         break;
1482         }
1483         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1484                 return 0;
1485
1486         /* If we timed out, inform the firmware that this is the case. */
1487         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1488                 if (!silent)
1489                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1490                                             "%x\n", msg_data);
1491
1492                 msg_data &= ~BNX2_DRV_MSG_CODE;
1493                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1494
1495                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1496
1497                 return -EBUSY;
1498         }
1499
1500         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1501                 return -EIO;
1502
1503         return 0;
1504 }
1505
1506 static int
1507 bnx2_init_5709_context(struct bnx2 *bp)
1508 {
1509         int i, ret = 0;
1510         u32 val;
1511
1512         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1513         val |= (BCM_PAGE_BITS - 8) << 16;
1514         REG_WR(bp, BNX2_CTX_COMMAND, val);
1515         for (i = 0; i < bp->ctx_pages; i++) {
1516                 int j;
1517
1518                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1519                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
1520                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1521                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1522                        (u64) bp->ctx_blk_mapping[i] >> 32);
1523                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1524                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1525                 for (j = 0; j < 10; j++) {
1526
1527                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1528                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1529                                 break;
1530                         udelay(5);
1531                 }
1532                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1533                         ret = -EBUSY;
1534                         break;
1535                 }
1536         }
1537         return ret;
1538 }
1539
1540 static void
1541 bnx2_init_context(struct bnx2 *bp)
1542 {
1543         u32 vcid;
1544
1545         vcid = 96;
1546         while (vcid) {
1547                 u32 vcid_addr, pcid_addr, offset;
1548
1549                 vcid--;
1550
1551                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1552                         u32 new_vcid;
1553
1554                         vcid_addr = GET_PCID_ADDR(vcid);
1555                         if (vcid & 0x8) {
1556                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1557                         }
1558                         else {
1559                                 new_vcid = vcid;
1560                         }
1561                         pcid_addr = GET_PCID_ADDR(new_vcid);
1562                 }
1563                 else {
1564                         vcid_addr = GET_CID_ADDR(vcid);
1565                         pcid_addr = vcid_addr;
1566                 }
1567
1568                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1569                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1570
1571                 /* Zero out the context. */
1572                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1573                         CTX_WR(bp, 0x00, offset, 0);
1574                 }
1575
1576                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1577                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1578         }
1579 }
1580
1581 static int
1582 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1583 {
1584         u16 *good_mbuf;
1585         u32 good_mbuf_cnt;
1586         u32 val;
1587
1588         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1589         if (good_mbuf == NULL) {
1590                 printk(KERN_ERR PFX "Failed to allocate memory in "
1591                                     "bnx2_alloc_bad_rbuf\n");
1592                 return -ENOMEM;
1593         }
1594
1595         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1596                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1597
1598         good_mbuf_cnt = 0;
1599
1600         /* Allocate a bunch of mbufs and save the good ones in an array. */
1601         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1602         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1603                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1604
1605                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1606
1607                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1608
1609                 /* The addresses with Bit 9 set are bad memory blocks. */
1610                 if (!(val & (1 << 9))) {
1611                         good_mbuf[good_mbuf_cnt] = (u16) val;
1612                         good_mbuf_cnt++;
1613                 }
1614
1615                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1616         }
1617
1618         /* Free the good ones back to the mbuf pool thus discarding
1619          * all the bad ones. */
1620         while (good_mbuf_cnt) {
1621                 good_mbuf_cnt--;
1622
1623                 val = good_mbuf[good_mbuf_cnt];
1624                 val = (val << 9) | val | 1;
1625
1626                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1627         }
1628         kfree(good_mbuf);
1629         return 0;
1630 }
1631
1632 static void
1633 bnx2_set_mac_addr(struct bnx2 *bp)
1634 {
1635         u32 val;
1636         u8 *mac_addr = bp->dev->dev_addr;
1637
1638         val = (mac_addr[0] << 8) | mac_addr[1];
1639
1640         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1641
1642         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1643                 (mac_addr[4] << 8) | mac_addr[5];
1644
1645         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1646 }
1647
1648 static inline int
1649 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1650 {
1651         struct sk_buff *skb;
1652         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1653         dma_addr_t mapping;
1654         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1655         unsigned long align;
1656
1657         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1658         if (skb == NULL) {
1659                 return -ENOMEM;
1660         }
1661
1662         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1663                 skb_reserve(skb, BNX2_RX_ALIGN - align);
1664
1665         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1666                 PCI_DMA_FROMDEVICE);
1667
1668         rx_buf->skb = skb;
1669         pci_unmap_addr_set(rx_buf, mapping, mapping);
1670
1671         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1672         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1673
1674         bp->rx_prod_bseq += bp->rx_buf_use_size;
1675
1676         return 0;
1677 }
1678
1679 static void
1680 bnx2_phy_int(struct bnx2 *bp)
1681 {
1682         u32 new_link_state, old_link_state;
1683
1684         new_link_state = bp->status_blk->status_attn_bits &
1685                 STATUS_ATTN_BITS_LINK_STATE;
1686         old_link_state = bp->status_blk->status_attn_bits_ack &
1687                 STATUS_ATTN_BITS_LINK_STATE;
1688         if (new_link_state != old_link_state) {
1689                 if (new_link_state) {
1690                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1691                                 STATUS_ATTN_BITS_LINK_STATE);
1692                 }
1693                 else {
1694                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1695                                 STATUS_ATTN_BITS_LINK_STATE);
1696                 }
1697                 bnx2_set_link(bp);
1698         }
1699 }
1700
1701 static void
1702 bnx2_tx_int(struct bnx2 *bp)
1703 {
1704         struct status_block *sblk = bp->status_blk;
1705         u16 hw_cons, sw_cons, sw_ring_cons;
1706         int tx_free_bd = 0;
1707
1708         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1709         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1710                 hw_cons++;
1711         }
1712         sw_cons = bp->tx_cons;
1713
1714         while (sw_cons != hw_cons) {
1715                 struct sw_bd *tx_buf;
1716                 struct sk_buff *skb;
1717                 int i, last;
1718
1719                 sw_ring_cons = TX_RING_IDX(sw_cons);
1720
1721                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1722                 skb = tx_buf->skb;
1723 #ifdef BCM_TSO
1724                 /* partial BD completions possible with TSO packets */
1725                 if (skb_is_gso(skb)) {
1726                         u16 last_idx, last_ring_idx;
1727
1728                         last_idx = sw_cons +
1729                                 skb_shinfo(skb)->nr_frags + 1;
1730                         last_ring_idx = sw_ring_cons +
1731                                 skb_shinfo(skb)->nr_frags + 1;
1732                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1733                                 last_idx++;
1734                         }
1735                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1736                                 break;
1737                         }
1738                 }
1739 #endif
1740                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1741                         skb_headlen(skb), PCI_DMA_TODEVICE);
1742
1743                 tx_buf->skb = NULL;
1744                 last = skb_shinfo(skb)->nr_frags;
1745
1746                 for (i = 0; i < last; i++) {
1747                         sw_cons = NEXT_TX_BD(sw_cons);
1748
1749                         pci_unmap_page(bp->pdev,
1750                                 pci_unmap_addr(
1751                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1752                                         mapping),
1753                                 skb_shinfo(skb)->frags[i].size,
1754                                 PCI_DMA_TODEVICE);
1755                 }
1756
1757                 sw_cons = NEXT_TX_BD(sw_cons);
1758
1759                 tx_free_bd += last + 1;
1760
1761                 dev_kfree_skb(skb);
1762
1763                 hw_cons = bp->hw_tx_cons =
1764                         sblk->status_tx_quick_consumer_index0;
1765
1766                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1767                         hw_cons++;
1768                 }
1769         }
1770
1771         bp->tx_cons = sw_cons;
1772         /* Need to make the tx_cons update visible to bnx2_start_xmit()
1773          * before checking for netif_queue_stopped().  Without the
1774          * memory barrier, there is a small possibility that bnx2_start_xmit()
1775          * will miss it and cause the queue to be stopped forever.
1776          */
1777         smp_mb();
1778
1779         if (unlikely(netif_queue_stopped(bp->dev)) &&
1780                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1781                 netif_tx_lock(bp->dev);
1782                 if ((netif_queue_stopped(bp->dev)) &&
1783                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1784                         netif_wake_queue(bp->dev);
1785                 netif_tx_unlock(bp->dev);
1786         }
1787 }
1788
1789 static inline void
1790 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1791         u16 cons, u16 prod)
1792 {
1793         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1794         struct rx_bd *cons_bd, *prod_bd;
1795
1796         cons_rx_buf = &bp->rx_buf_ring[cons];
1797         prod_rx_buf = &bp->rx_buf_ring[prod];
1798
1799         pci_dma_sync_single_for_device(bp->pdev,
1800                 pci_unmap_addr(cons_rx_buf, mapping),
1801                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1802
1803         bp->rx_prod_bseq += bp->rx_buf_use_size;
1804
1805         prod_rx_buf->skb = skb;
1806
1807         if (cons == prod)
1808                 return;
1809
1810         pci_unmap_addr_set(prod_rx_buf, mapping,
1811                         pci_unmap_addr(cons_rx_buf, mapping));
1812
1813         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1814         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1815         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1816         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1817 }
1818
1819 static int
1820 bnx2_rx_int(struct bnx2 *bp, int budget)
1821 {
1822         struct status_block *sblk = bp->status_blk;
1823         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1824         struct l2_fhdr *rx_hdr;
1825         int rx_pkt = 0;
1826
1827         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1828         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1829                 hw_cons++;
1830         }
1831         sw_cons = bp->rx_cons;
1832         sw_prod = bp->rx_prod;
1833
1834         /* Memory barrier necessary as speculative reads of the rx
1835          * buffer can be ahead of the index in the status block
1836          */
1837         rmb();
1838         while (sw_cons != hw_cons) {
1839                 unsigned int len;
1840                 u32 status;
1841                 struct sw_bd *rx_buf;
1842                 struct sk_buff *skb;
1843                 dma_addr_t dma_addr;
1844
1845                 sw_ring_cons = RX_RING_IDX(sw_cons);
1846                 sw_ring_prod = RX_RING_IDX(sw_prod);
1847
1848                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1849                 skb = rx_buf->skb;
1850
1851                 rx_buf->skb = NULL;
1852
1853                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1854
1855                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1856                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1857
1858                 rx_hdr = (struct l2_fhdr *) skb->data;
1859                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1860
1861                 if ((status = rx_hdr->l2_fhdr_status) &
1862                         (L2_FHDR_ERRORS_BAD_CRC |
1863                         L2_FHDR_ERRORS_PHY_DECODE |
1864                         L2_FHDR_ERRORS_ALIGNMENT |
1865                         L2_FHDR_ERRORS_TOO_SHORT |
1866                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1867
1868                         goto reuse_rx;
1869                 }
1870
1871                 /* Since we don't have a jumbo ring, copy small packets
1872                  * if mtu > 1500
1873                  */
1874                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1875                         struct sk_buff *new_skb;
1876
1877                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
1878                         if (new_skb == NULL)
1879                                 goto reuse_rx;
1880
1881                         /* aligned copy */
1882                         memcpy(new_skb->data,
1883                                 skb->data + bp->rx_offset - 2,
1884                                 len + 2);
1885
1886                         skb_reserve(new_skb, 2);
1887                         skb_put(new_skb, len);
1888
1889                         bnx2_reuse_rx_skb(bp, skb,
1890                                 sw_ring_cons, sw_ring_prod);
1891
1892                         skb = new_skb;
1893                 }
1894                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1895                         pci_unmap_single(bp->pdev, dma_addr,
1896                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1897
1898                         skb_reserve(skb, bp->rx_offset);
1899                         skb_put(skb, len);
1900                 }
1901                 else {
1902 reuse_rx:
1903                         bnx2_reuse_rx_skb(bp, skb,
1904                                 sw_ring_cons, sw_ring_prod);
1905                         goto next_rx;
1906                 }
1907
1908                 skb->protocol = eth_type_trans(skb, bp->dev);
1909
1910                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1911                         (ntohs(skb->protocol) != 0x8100)) {
1912
1913                         dev_kfree_skb(skb);
1914                         goto next_rx;
1915
1916                 }
1917
1918                 skb->ip_summed = CHECKSUM_NONE;
1919                 if (bp->rx_csum &&
1920                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1921                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1922
1923                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1924                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1925                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1926                 }
1927
1928 #ifdef BCM_VLAN
1929                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1930                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1931                                 rx_hdr->l2_fhdr_vlan_tag);
1932                 }
1933                 else
1934 #endif
1935                         netif_receive_skb(skb);
1936
1937                 bp->dev->last_rx = jiffies;
1938                 rx_pkt++;
1939
1940 next_rx:
1941                 sw_cons = NEXT_RX_BD(sw_cons);
1942                 sw_prod = NEXT_RX_BD(sw_prod);
1943
1944                 if ((rx_pkt == budget))
1945                         break;
1946
1947                 /* Refresh hw_cons to see if there is new work */
1948                 if (sw_cons == hw_cons) {
1949                         hw_cons = bp->hw_rx_cons =
1950                                 sblk->status_rx_quick_consumer_index0;
1951                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1952                                 hw_cons++;
1953                         rmb();
1954                 }
1955         }
1956         bp->rx_cons = sw_cons;
1957         bp->rx_prod = sw_prod;
1958
1959         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1960
1961         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1962
1963         mmiowb();
1964
1965         return rx_pkt;
1966
1967 }
1968
1969 /* MSI ISR - The only difference between this and the INTx ISR
1970  * is that the MSI interrupt is always serviced.
1971  */
1972 static irqreturn_t
1973 bnx2_msi(int irq, void *dev_instance)
1974 {
1975         struct net_device *dev = dev_instance;
1976         struct bnx2 *bp = netdev_priv(dev);
1977
1978         prefetch(bp->status_blk);
1979         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1980                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1981                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1982
1983         /* Return here if interrupt is disabled. */
1984         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1985                 return IRQ_HANDLED;
1986
1987         netif_rx_schedule(dev);
1988
1989         return IRQ_HANDLED;
1990 }
1991
1992 static irqreturn_t
1993 bnx2_interrupt(int irq, void *dev_instance)
1994 {
1995         struct net_device *dev = dev_instance;
1996         struct bnx2 *bp = netdev_priv(dev);
1997
1998         /* When using INTx, it is possible for the interrupt to arrive
1999          * at the CPU before the status block posted prior to the
2000          * interrupt. Reading a register will flush the status block.
2001          * When using MSI, the MSI message will always complete after
2002          * the status block write.
2003          */
2004         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2005             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2006              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2007                 return IRQ_NONE;
2008
2009         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2010                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2011                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2012
2013         /* Return here if interrupt is shared and is disabled. */
2014         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2015                 return IRQ_HANDLED;
2016
2017         netif_rx_schedule(dev);
2018
2019         return IRQ_HANDLED;
2020 }
2021
2022 static inline int
2023 bnx2_has_work(struct bnx2 *bp)
2024 {
2025         struct status_block *sblk = bp->status_blk;
2026
2027         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2028             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2029                 return 1;
2030
2031         if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2032             bp->link_up)
2033                 return 1;
2034
2035         return 0;
2036 }
2037
2038 static int
2039 bnx2_poll(struct net_device *dev, int *budget)
2040 {
2041         struct bnx2 *bp = netdev_priv(dev);
2042
2043         if ((bp->status_blk->status_attn_bits &
2044                 STATUS_ATTN_BITS_LINK_STATE) !=
2045                 (bp->status_blk->status_attn_bits_ack &
2046                 STATUS_ATTN_BITS_LINK_STATE)) {
2047
2048                 spin_lock(&bp->phy_lock);
2049                 bnx2_phy_int(bp);
2050                 spin_unlock(&bp->phy_lock);
2051
2052                 /* This is needed to take care of transient status
2053                  * during link changes.
2054                  */
2055                 REG_WR(bp, BNX2_HC_COMMAND,
2056                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2057                 REG_RD(bp, BNX2_HC_COMMAND);
2058         }
2059
2060         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2061                 bnx2_tx_int(bp);
2062
2063         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2064                 int orig_budget = *budget;
2065                 int work_done;
2066
2067                 if (orig_budget > dev->quota)
2068                         orig_budget = dev->quota;
2069
2070                 work_done = bnx2_rx_int(bp, orig_budget);
2071                 *budget -= work_done;
2072                 dev->quota -= work_done;
2073         }
2074
2075         bp->last_status_idx = bp->status_blk->status_idx;
2076         rmb();
2077
2078         if (!bnx2_has_work(bp)) {
2079                 netif_rx_complete(dev);
2080                 if (likely(bp->flags & USING_MSI_FLAG)) {
2081                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2082                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2083                                bp->last_status_idx);
2084                         return 0;
2085                 }
2086                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2087                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2088                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2089                        bp->last_status_idx);
2090
2091                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2092                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2093                        bp->last_status_idx);
2094                 return 0;
2095         }
2096
2097         return 1;
2098 }
2099
2100 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2101  * from set_multicast.
2102  */
2103 static void
2104 bnx2_set_rx_mode(struct net_device *dev)
2105 {
2106         struct bnx2 *bp = netdev_priv(dev);
2107         u32 rx_mode, sort_mode;
2108         int i;
2109
2110         spin_lock_bh(&bp->phy_lock);
2111
2112         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2113                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2114         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2115 #ifdef BCM_VLAN
2116         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2117                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2118 #else
2119         if (!(bp->flags & ASF_ENABLE_FLAG))
2120                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2121 #endif
2122         if (dev->flags & IFF_PROMISC) {
2123                 /* Promiscuous mode. */
2124                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2125                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2126                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2127         }
2128         else if (dev->flags & IFF_ALLMULTI) {
2129                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2130                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2131                                0xffffffff);
2132                 }
2133                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2134         }
2135         else {
2136                 /* Accept one or more multicast(s). */
2137                 struct dev_mc_list *mclist;
2138                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2139                 u32 regidx;
2140                 u32 bit;
2141                 u32 crc;
2142
2143                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2144
2145                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2146                      i++, mclist = mclist->next) {
2147
2148                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2149                         bit = crc & 0xff;
2150                         regidx = (bit & 0xe0) >> 5;
2151                         bit &= 0x1f;
2152                         mc_filter[regidx] |= (1 << bit);
2153                 }
2154
2155                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2156                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2157                                mc_filter[i]);
2158                 }
2159
2160                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2161         }
2162
2163         if (rx_mode != bp->rx_mode) {
2164                 bp->rx_mode = rx_mode;
2165                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2166         }
2167
2168         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2169         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2170         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2171
2172         spin_unlock_bh(&bp->phy_lock);
2173 }
2174
2175 #define FW_BUF_SIZE     0x8000
2176
2177 static int
2178 bnx2_gunzip_init(struct bnx2 *bp)
2179 {
2180         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2181                 goto gunzip_nomem1;
2182
2183         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2184                 goto gunzip_nomem2;
2185
2186         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2187         if (bp->strm->workspace == NULL)
2188                 goto gunzip_nomem3;
2189
2190         return 0;
2191
2192 gunzip_nomem3:
2193         kfree(bp->strm);
2194         bp->strm = NULL;
2195
2196 gunzip_nomem2:
2197         vfree(bp->gunzip_buf);
2198         bp->gunzip_buf = NULL;
2199
2200 gunzip_nomem1:
2201         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2202                             "uncompression.\n", bp->dev->name);
2203         return -ENOMEM;
2204 }
2205
2206 static void
2207 bnx2_gunzip_end(struct bnx2 *bp)
2208 {
2209         kfree(bp->strm->workspace);
2210
2211         kfree(bp->strm);
2212         bp->strm = NULL;
2213
2214         if (bp->gunzip_buf) {
2215                 vfree(bp->gunzip_buf);
2216                 bp->gunzip_buf = NULL;
2217         }
2218 }
2219
2220 static int
2221 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2222 {
2223         int n, rc;
2224
2225         /* check gzip header */
2226         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2227                 return -EINVAL;
2228
2229         n = 10;
2230
2231 #define FNAME   0x8
2232         if (zbuf[3] & FNAME)
2233                 while ((zbuf[n++] != 0) && (n < len));
2234
2235         bp->strm->next_in = zbuf + n;
2236         bp->strm->avail_in = len - n;
2237         bp->strm->next_out = bp->gunzip_buf;
2238         bp->strm->avail_out = FW_BUF_SIZE;
2239
2240         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2241         if (rc != Z_OK)
2242                 return rc;
2243
2244         rc = zlib_inflate(bp->strm, Z_FINISH);
2245
2246         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2247         *outbuf = bp->gunzip_buf;
2248
2249         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2250                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2251                        bp->dev->name, bp->strm->msg);
2252
2253         zlib_inflateEnd(bp->strm);
2254
2255         if (rc == Z_STREAM_END)
2256                 return 0;
2257
2258         return rc;
2259 }
2260
2261 static void
2262 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2263         u32 rv2p_proc)
2264 {
2265         int i;
2266         u32 val;
2267
2268
2269         for (i = 0; i < rv2p_code_len; i += 8) {
2270                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2271                 rv2p_code++;
2272                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2273                 rv2p_code++;
2274
2275                 if (rv2p_proc == RV2P_PROC1) {
2276                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2277                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2278                 }
2279                 else {
2280                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2281                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2282                 }
2283         }
2284
2285         /* Reset the processor, un-stall is done later. */
2286         if (rv2p_proc == RV2P_PROC1) {
2287                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2288         }
2289         else {
2290                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2291         }
2292 }
2293
2294 static int
2295 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2296 {
2297         u32 offset;
2298         u32 val;
2299         int rc;
2300
2301         /* Halt the CPU. */
2302         val = REG_RD_IND(bp, cpu_reg->mode);
2303         val |= cpu_reg->mode_value_halt;
2304         REG_WR_IND(bp, cpu_reg->mode, val);
2305         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2306
2307         /* Load the Text area. */
2308         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2309         if (fw->gz_text) {
2310                 u32 text_len;
2311                 void *text;
2312
2313                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2314                                  &text_len);
2315                 if (rc)
2316                         return rc;
2317
2318                 fw->text = text;
2319         }
2320         if (fw->gz_text) {
2321                 int j;
2322
2323                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2324                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2325                 }
2326         }
2327
2328         /* Load the Data area. */
2329         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2330         if (fw->data) {
2331                 int j;
2332
2333                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2334                         REG_WR_IND(bp, offset, fw->data[j]);
2335                 }
2336         }
2337
2338         /* Load the SBSS area. */
2339         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2340         if (fw->sbss) {
2341                 int j;
2342
2343                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2344                         REG_WR_IND(bp, offset, fw->sbss[j]);
2345                 }
2346         }
2347
2348         /* Load the BSS area. */
2349         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2350         if (fw->bss) {
2351                 int j;
2352
2353                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2354                         REG_WR_IND(bp, offset, fw->bss[j]);
2355                 }
2356         }
2357
2358         /* Load the Read-Only area. */
2359         offset = cpu_reg->spad_base +
2360                 (fw->rodata_addr - cpu_reg->mips_view_base);
2361         if (fw->rodata) {
2362                 int j;
2363
2364                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2365                         REG_WR_IND(bp, offset, fw->rodata[j]);
2366                 }
2367         }
2368
2369         /* Clear the pre-fetch instruction. */
2370         REG_WR_IND(bp, cpu_reg->inst, 0);
2371         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2372
2373         /* Start the CPU. */
2374         val = REG_RD_IND(bp, cpu_reg->mode);
2375         val &= ~cpu_reg->mode_value_halt;
2376         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2377         REG_WR_IND(bp, cpu_reg->mode, val);
2378
2379         return 0;
2380 }
2381
2382 static int
2383 bnx2_init_cpus(struct bnx2 *bp)
2384 {
2385         struct cpu_reg cpu_reg;
2386         struct fw_info *fw;
2387         int rc = 0;
2388         void *text;
2389         u32 text_len;
2390
2391         if ((rc = bnx2_gunzip_init(bp)) != 0)
2392                 return rc;
2393
2394         /* Initialize the RV2P processor. */
2395         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2396                          &text_len);
2397         if (rc)
2398                 goto init_cpu_err;
2399
2400         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2401
2402         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2403                          &text_len);
2404         if (rc)
2405                 goto init_cpu_err;
2406
2407         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2408
2409         /* Initialize the RX Processor. */
2410         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2411         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2412         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2413         cpu_reg.state = BNX2_RXP_CPU_STATE;
2414         cpu_reg.state_value_clear = 0xffffff;
2415         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2416         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2417         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2418         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2419         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2420         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2421         cpu_reg.mips_view_base = 0x8000000;
2422
2423         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2424                 fw = &bnx2_rxp_fw_09;
2425         else
2426                 fw = &bnx2_rxp_fw_06;
2427
2428         rc = load_cpu_fw(bp, &cpu_reg, fw);
2429         if (rc)
2430                 goto init_cpu_err;
2431
2432         /* Initialize the TX Processor. */
2433         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2434         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2435         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2436         cpu_reg.state = BNX2_TXP_CPU_STATE;
2437         cpu_reg.state_value_clear = 0xffffff;
2438         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2439         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2440         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2441         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2442         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2443         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2444         cpu_reg.mips_view_base = 0x8000000;
2445
2446         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2447                 fw = &bnx2_txp_fw_09;
2448         else
2449                 fw = &bnx2_txp_fw_06;
2450
2451         rc = load_cpu_fw(bp, &cpu_reg, fw);
2452         if (rc)
2453                 goto init_cpu_err;
2454
2455         /* Initialize the TX Patch-up Processor. */
2456         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2457         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2458         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2459         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2460         cpu_reg.state_value_clear = 0xffffff;
2461         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2462         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2463         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2464         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2465         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2466         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2467         cpu_reg.mips_view_base = 0x8000000;
2468
2469         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2470                 fw = &bnx2_tpat_fw_09;
2471         else
2472                 fw = &bnx2_tpat_fw_06;
2473
2474         rc = load_cpu_fw(bp, &cpu_reg, fw);
2475         if (rc)
2476                 goto init_cpu_err;
2477
2478         /* Initialize the Completion Processor. */
2479         cpu_reg.mode = BNX2_COM_CPU_MODE;
2480         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2481         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2482         cpu_reg.state = BNX2_COM_CPU_STATE;
2483         cpu_reg.state_value_clear = 0xffffff;
2484         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2485         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2486         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2487         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2488         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2489         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2490         cpu_reg.mips_view_base = 0x8000000;
2491
2492         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2493                 fw = &bnx2_com_fw_09;
2494         else
2495                 fw = &bnx2_com_fw_06;
2496
2497         rc = load_cpu_fw(bp, &cpu_reg, fw);
2498         if (rc)
2499                 goto init_cpu_err;
2500
2501         /* Initialize the Command Processor. */
2502         cpu_reg.mode = BNX2_CP_CPU_MODE;
2503         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2504         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2505         cpu_reg.state = BNX2_CP_CPU_STATE;
2506         cpu_reg.state_value_clear = 0xffffff;
2507         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2508         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2509         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2510         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2511         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2512         cpu_reg.spad_base = BNX2_CP_SCRATCH;
2513         cpu_reg.mips_view_base = 0x8000000;
2514
2515         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2516                 fw = &bnx2_cp_fw_09;
2517
2518                 rc = load_cpu_fw(bp, &cpu_reg, fw);
2519                 if (rc)
2520                         goto init_cpu_err;
2521         }
2522 init_cpu_err:
2523         bnx2_gunzip_end(bp);
2524         return rc;
2525 }
2526
2527 static int
2528 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2529 {
2530         u16 pmcsr;
2531
2532         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2533
2534         switch (state) {
2535         case PCI_D0: {
2536                 u32 val;
2537
2538                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2539                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2540                         PCI_PM_CTRL_PME_STATUS);
2541
2542                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2543                         /* delay required during transition out of D3hot */
2544                         msleep(20);
2545
2546                 val = REG_RD(bp, BNX2_EMAC_MODE);
2547                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2548                 val &= ~BNX2_EMAC_MODE_MPKT;
2549                 REG_WR(bp, BNX2_EMAC_MODE, val);
2550
2551                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2552                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2553                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2554                 break;
2555         }
2556         case PCI_D3hot: {
2557                 int i;
2558                 u32 val, wol_msg;
2559
2560                 if (bp->wol) {
2561                         u32 advertising;
2562                         u8 autoneg;
2563
2564                         autoneg = bp->autoneg;
2565                         advertising = bp->advertising;
2566
2567                         bp->autoneg = AUTONEG_SPEED;
2568                         bp->advertising = ADVERTISED_10baseT_Half |
2569                                 ADVERTISED_10baseT_Full |
2570                                 ADVERTISED_100baseT_Half |
2571                                 ADVERTISED_100baseT_Full |
2572                                 ADVERTISED_Autoneg;
2573
2574                         bnx2_setup_copper_phy(bp);
2575
2576                         bp->autoneg = autoneg;
2577                         bp->advertising = advertising;
2578
2579                         bnx2_set_mac_addr(bp);
2580
2581                         val = REG_RD(bp, BNX2_EMAC_MODE);
2582
2583                         /* Enable port mode. */
2584                         val &= ~BNX2_EMAC_MODE_PORT;
2585                         val |= BNX2_EMAC_MODE_PORT_MII |
2586                                BNX2_EMAC_MODE_MPKT_RCVD |
2587                                BNX2_EMAC_MODE_ACPI_RCVD |
2588                                BNX2_EMAC_MODE_MPKT;
2589
2590                         REG_WR(bp, BNX2_EMAC_MODE, val);
2591
2592                         /* receive all multicast */
2593                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2594                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2595                                        0xffffffff);
2596                         }
2597                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2598                                BNX2_EMAC_RX_MODE_SORT_MODE);
2599
2600                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2601                               BNX2_RPM_SORT_USER0_MC_EN;
2602                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2603                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2604                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2605                                BNX2_RPM_SORT_USER0_ENA);
2606
2607                         /* Need to enable EMAC and RPM for WOL. */
2608                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2609                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2610                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2611                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2612
2613                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2614                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2615                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2616
2617                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2618                 }
2619                 else {
2620                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2621                 }
2622
2623                 if (!(bp->flags & NO_WOL_FLAG))
2624                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2625
2626                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2627                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2628                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2629
2630                         if (bp->wol)
2631                                 pmcsr |= 3;
2632                 }
2633                 else {
2634                         pmcsr |= 3;
2635                 }
2636                 if (bp->wol) {
2637                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2638                 }
2639                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2640                                       pmcsr);
2641
2642                 /* No more memory access after this point until
2643                  * device is brought back to D0.
2644                  */
2645                 udelay(50);
2646                 break;
2647         }
2648         default:
2649                 return -EINVAL;
2650         }
2651         return 0;
2652 }
2653
2654 static int
2655 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2656 {
2657         u32 val;
2658         int j;
2659
2660         /* Request access to the flash interface. */
2661         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2662         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2663                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2664                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2665                         break;
2666
2667                 udelay(5);
2668         }
2669
2670         if (j >= NVRAM_TIMEOUT_COUNT)
2671                 return -EBUSY;
2672
2673         return 0;
2674 }
2675
2676 static int
2677 bnx2_release_nvram_lock(struct bnx2 *bp)
2678 {
2679         int j;
2680         u32 val;
2681
2682         /* Relinquish nvram interface. */
2683         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2684
2685         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2686                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2687                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2688                         break;
2689
2690                 udelay(5);
2691         }
2692
2693         if (j >= NVRAM_TIMEOUT_COUNT)
2694                 return -EBUSY;
2695
2696         return 0;
2697 }
2698
2699
2700 static int
2701 bnx2_enable_nvram_write(struct bnx2 *bp)
2702 {
2703         u32 val;
2704
2705         val = REG_RD(bp, BNX2_MISC_CFG);
2706         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2707
2708         if (!bp->flash_info->buffered) {
2709                 int j;
2710
2711                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2712                 REG_WR(bp, BNX2_NVM_COMMAND,
2713                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2714
2715                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2716                         udelay(5);
2717
2718                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2719                         if (val & BNX2_NVM_COMMAND_DONE)
2720                                 break;
2721                 }
2722
2723                 if (j >= NVRAM_TIMEOUT_COUNT)
2724                         return -EBUSY;
2725         }
2726         return 0;
2727 }
2728
2729 static void
2730 bnx2_disable_nvram_write(struct bnx2 *bp)
2731 {
2732         u32 val;
2733
2734         val = REG_RD(bp, BNX2_MISC_CFG);
2735         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2736 }
2737
2738
2739 static void
2740 bnx2_enable_nvram_access(struct bnx2 *bp)
2741 {
2742         u32 val;
2743
2744         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2745         /* Enable both bits, even on read. */
2746         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2747                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2748 }
2749
2750 static void
2751 bnx2_disable_nvram_access(struct bnx2 *bp)
2752 {
2753         u32 val;
2754
2755         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2756         /* Disable both bits, even after read. */
2757         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2758                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2759                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2760 }
2761
2762 static int
2763 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2764 {
2765         u32 cmd;
2766         int j;
2767
2768         if (bp->flash_info->buffered)
2769                 /* Buffered flash, no erase needed */
2770                 return 0;
2771
2772         /* Build an erase command */
2773         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2774               BNX2_NVM_COMMAND_DOIT;
2775
2776         /* Need to clear DONE bit separately. */
2777         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2778
2779         /* Address of the NVRAM to read from. */
2780         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2781
2782         /* Issue an erase command. */
2783         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2784
2785         /* Wait for completion. */
2786         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2787                 u32 val;
2788
2789                 udelay(5);
2790
2791                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2792                 if (val & BNX2_NVM_COMMAND_DONE)
2793                         break;
2794         }
2795
2796         if (j >= NVRAM_TIMEOUT_COUNT)
2797                 return -EBUSY;
2798
2799         return 0;
2800 }
2801
2802 static int
2803 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2804 {
2805         u32 cmd;
2806         int j;
2807
2808         /* Build the command word. */
2809         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2810
2811         /* Calculate an offset of a buffered flash. */
2812         if (bp->flash_info->buffered) {
2813                 offset = ((offset / bp->flash_info->page_size) <<
2814                            bp->flash_info->page_bits) +
2815                           (offset % bp->flash_info->page_size);
2816         }
2817
2818         /* Need to clear DONE bit separately. */
2819         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2820
2821         /* Address of the NVRAM to read from. */
2822         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2823
2824         /* Issue a read command. */
2825         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2826
2827         /* Wait for completion. */
2828         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2829                 u32 val;
2830
2831                 udelay(5);
2832
2833                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2834                 if (val & BNX2_NVM_COMMAND_DONE) {
2835                         val = REG_RD(bp, BNX2_NVM_READ);
2836
2837                         val = be32_to_cpu(val);
2838                         memcpy(ret_val, &val, 4);
2839                         break;
2840                 }
2841         }
2842         if (j >= NVRAM_TIMEOUT_COUNT)
2843                 return -EBUSY;
2844
2845         return 0;
2846 }
2847
2848
2849 static int
2850 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2851 {
2852         u32 cmd, val32;
2853         int j;
2854
2855         /* Build the command word. */
2856         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2857
2858         /* Calculate an offset of a buffered flash. */
2859         if (bp->flash_info->buffered) {
2860                 offset = ((offset / bp->flash_info->page_size) <<
2861                           bp->flash_info->page_bits) +
2862                          (offset % bp->flash_info->page_size);
2863         }
2864
2865         /* Need to clear DONE bit separately. */
2866         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2867
2868         memcpy(&val32, val, 4);
2869         val32 = cpu_to_be32(val32);
2870
2871         /* Write the data. */
2872         REG_WR(bp, BNX2_NVM_WRITE, val32);
2873
2874         /* Address of the NVRAM to write to. */
2875         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2876
2877         /* Issue the write command. */
2878         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2879
2880         /* Wait for completion. */
2881         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2882                 udelay(5);
2883
2884                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2885                         break;
2886         }
2887         if (j >= NVRAM_TIMEOUT_COUNT)
2888                 return -EBUSY;
2889
2890         return 0;
2891 }
2892
2893 static int
2894 bnx2_init_nvram(struct bnx2 *bp)
2895 {
2896         u32 val;
2897         int j, entry_count, rc;
2898         struct flash_spec *flash;
2899
2900         /* Determine the selected interface. */
2901         val = REG_RD(bp, BNX2_NVM_CFG1);
2902
2903         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2904
2905         rc = 0;
2906         if (val & 0x40000000) {
2907
2908                 /* Flash interface has been reconfigured */
2909                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2910                      j++, flash++) {
2911                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2912                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2913                                 bp->flash_info = flash;
2914                                 break;
2915                         }
2916                 }
2917         }
2918         else {
2919                 u32 mask;
2920                 /* Not yet been reconfigured */
2921
2922                 if (val & (1 << 23))
2923                         mask = FLASH_BACKUP_STRAP_MASK;
2924                 else
2925                         mask = FLASH_STRAP_MASK;
2926
2927                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2928                         j++, flash++) {
2929
2930                         if ((val & mask) == (flash->strapping & mask)) {
2931                                 bp->flash_info = flash;
2932
2933                                 /* Request access to the flash interface. */
2934                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2935                                         return rc;
2936
2937                                 /* Enable access to flash interface */
2938                                 bnx2_enable_nvram_access(bp);
2939
2940                                 /* Reconfigure the flash interface */
2941                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2942                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2943                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2944                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2945
2946                                 /* Disable access to flash interface */
2947                                 bnx2_disable_nvram_access(bp);
2948                                 bnx2_release_nvram_lock(bp);
2949
2950                                 break;
2951                         }
2952                 }
2953         } /* if (val & 0x40000000) */
2954
2955         if (j == entry_count) {
2956                 bp->flash_info = NULL;
2957                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2958                 return -ENODEV;
2959         }
2960
2961         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2962         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2963         if (val)
2964                 bp->flash_size = val;
2965         else
2966                 bp->flash_size = bp->flash_info->total_size;
2967
2968         return rc;
2969 }
2970
2971 static int
2972 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2973                 int buf_size)
2974 {
2975         int rc = 0;
2976         u32 cmd_flags, offset32, len32, extra;
2977
2978         if (buf_size == 0)
2979                 return 0;
2980
2981         /* Request access to the flash interface. */
2982         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2983                 return rc;
2984
2985         /* Enable access to flash interface */
2986         bnx2_enable_nvram_access(bp);
2987
2988         len32 = buf_size;
2989         offset32 = offset;
2990         extra = 0;
2991
2992         cmd_flags = 0;
2993
2994         if (offset32 & 3) {
2995                 u8 buf[4];
2996                 u32 pre_len;
2997
2998                 offset32 &= ~3;
2999                 pre_len = 4 - (offset & 3);
3000
3001                 if (pre_len >= len32) {
3002                         pre_len = len32;
3003                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3004                                     BNX2_NVM_COMMAND_LAST;
3005                 }
3006                 else {
3007                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3008                 }
3009
3010                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3011
3012                 if (rc)
3013                         return rc;
3014
3015                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3016
3017                 offset32 += 4;
3018                 ret_buf += pre_len;
3019                 len32 -= pre_len;
3020         }
3021         if (len32 & 3) {
3022                 extra = 4 - (len32 & 3);
3023                 len32 = (len32 + 4) & ~3;
3024         }
3025
3026         if (len32 == 4) {
3027                 u8 buf[4];
3028
3029                 if (cmd_flags)
3030                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3031                 else
3032                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3033                                     BNX2_NVM_COMMAND_LAST;
3034
3035                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3036
3037                 memcpy(ret_buf, buf, 4 - extra);
3038         }
3039         else if (len32 > 0) {
3040                 u8 buf[4];
3041
3042                 /* Read the first word. */
3043                 if (cmd_flags)
3044                         cmd_flags = 0;
3045                 else
3046                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3047
3048                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3049
3050                 /* Advance to the next dword. */
3051                 offset32 += 4;
3052                 ret_buf += 4;
3053                 len32 -= 4;
3054
3055                 while (len32 > 4 && rc == 0) {
3056                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3057
3058                         /* Advance to the next dword. */
3059                         offset32 += 4;
3060                         ret_buf += 4;
3061                         len32 -= 4;
3062                 }
3063
3064                 if (rc)
3065                         return rc;
3066
3067                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3068                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3069
3070                 memcpy(ret_buf, buf, 4 - extra);
3071         }
3072
3073         /* Disable access to flash interface */
3074         bnx2_disable_nvram_access(bp);
3075
3076         bnx2_release_nvram_lock(bp);
3077
3078         return rc;
3079 }
3080
3081 static int
3082 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3083                 int buf_size)
3084 {
3085         u32 written, offset32, len32;
3086         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3087         int rc = 0;
3088         int align_start, align_end;
3089
3090         buf = data_buf;
3091         offset32 = offset;
3092         len32 = buf_size;
3093         align_start = align_end = 0;
3094
3095         if ((align_start = (offset32 & 3))) {
3096                 offset32 &= ~3;
3097                 len32 += (4 - align_start);
3098                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3099                         return rc;
3100         }
3101
3102         if (len32 & 3) {
3103                 if ((len32 > 4) || !align_start) {
3104                         align_end = 4 - (len32 & 3);
3105                         len32 += align_end;
3106                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3107                                 end, 4))) {
3108                                 return rc;
3109                         }
3110                 }
3111         }
3112
3113         if (align_start || align_end) {
3114                 align_buf = kmalloc(len32, GFP_KERNEL);
3115                 if (align_buf == NULL)
3116                         return -ENOMEM;
3117                 if (align_start) {
3118                         memcpy(align_buf, start, 4);
3119                 }
3120                 if (align_end) {
3121                         memcpy(align_buf + len32 - 4, end, 4);
3122                 }
3123                 memcpy(align_buf + align_start, data_buf, buf_size);
3124                 buf = align_buf;
3125         }
3126
3127         if (bp->flash_info->buffered == 0) {
3128                 flash_buffer = kmalloc(264, GFP_KERNEL);
3129                 if (flash_buffer == NULL) {
3130                         rc = -ENOMEM;
3131                         goto nvram_write_end;
3132                 }
3133         }
3134
3135         written = 0;
3136         while ((written < len32) && (rc == 0)) {
3137                 u32 page_start, page_end, data_start, data_end;
3138                 u32 addr, cmd_flags;
3139                 int i;
3140
3141                 /* Find the page_start addr */
3142                 page_start = offset32 + written;
3143                 page_start -= (page_start % bp->flash_info->page_size);
3144                 /* Find the page_end addr */
3145                 page_end = page_start + bp->flash_info->page_size;
3146                 /* Find the data_start addr */
3147                 data_start = (written == 0) ? offset32 : page_start;
3148                 /* Find the data_end addr */
3149                 data_end = (page_end > offset32 + len32) ?
3150                         (offset32 + len32) : page_end;
3151
3152                 /* Request access to the flash interface. */
3153                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3154                         goto nvram_write_end;
3155
3156                 /* Enable access to flash interface */
3157                 bnx2_enable_nvram_access(bp);
3158
3159                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3160                 if (bp->flash_info->buffered == 0) {
3161                         int j;
3162
3163                         /* Read the whole page into the buffer
3164                          * (non-buffer flash only) */
3165                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3166                                 if (j == (bp->flash_info->page_size - 4)) {
3167                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3168                                 }
3169                                 rc = bnx2_nvram_read_dword(bp,
3170                                         page_start + j,
3171                                         &flash_buffer[j],
3172                                         cmd_flags);
3173
3174                                 if (rc)
3175                                         goto nvram_write_end;
3176
3177                                 cmd_flags = 0;
3178                         }
3179                 }
3180
3181                 /* Enable writes to flash interface (unlock write-protect) */
3182                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3183                         goto nvram_write_end;
3184
3185                 /* Erase the page */
3186                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3187                         goto nvram_write_end;
3188
3189                 /* Re-enable the write again for the actual write */
3190                 bnx2_enable_nvram_write(bp);
3191
3192                 /* Loop to write back the buffer data from page_start to
3193                  * data_start */
3194                 i = 0;
3195                 if (bp->flash_info->buffered == 0) {
3196                         for (addr = page_start; addr < data_start;
3197                                 addr += 4, i += 4) {
3198
3199                                 rc = bnx2_nvram_write_dword(bp, addr,
3200                                         &flash_buffer[i], cmd_flags);
3201
3202                                 if (rc != 0)
3203                                         goto nvram_write_end;
3204
3205                                 cmd_flags = 0;
3206                         }
3207                 }
3208
3209                 /* Loop to write the new data from data_start to data_end */
3210                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3211                         if ((addr == page_end - 4) ||
3212                                 ((bp->flash_info->buffered) &&
3213                                  (addr == data_end - 4))) {
3214
3215                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3216                         }
3217                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3218                                 cmd_flags);
3219
3220                         if (rc != 0)
3221                                 goto nvram_write_end;
3222
3223                         cmd_flags = 0;
3224                         buf += 4;
3225                 }
3226
3227                 /* Loop to write back the buffer data from data_end
3228                  * to page_end */
3229                 if (bp->flash_info->buffered == 0) {
3230                         for (addr = data_end; addr < page_end;
3231                                 addr += 4, i += 4) {
3232
3233                                 if (addr == page_end-4) {
3234                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3235                                 }
3236                                 rc = bnx2_nvram_write_dword(bp, addr,
3237                                         &flash_buffer[i], cmd_flags);
3238
3239                                 if (rc != 0)
3240                                         goto nvram_write_end;
3241
3242                                 cmd_flags = 0;
3243                         }
3244                 }
3245