Pull percpu-dtc into release branch
[sfrench/cifs-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME         "bnx2"
56 #define PFX DRV_MODULE_NAME     ": "
57 #define DRV_MODULE_VERSION      "1.5.8"
58 #define DRV_MODULE_RELDATE      "April 24, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT  (5*HZ)
64
65 static const char version[] __devinitdata =
66         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79         BCM5706 = 0,
80         NC370T,
81         NC370I,
82         BCM5706S,
83         NC370F,
84         BCM5708,
85         BCM5708S,
86         BCM5709,
87 } board_t;
88
89 /* indexed by board_t, above */
90 static const struct {
91         char *name;
92 } board_info[] __devinitdata = {
93         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94         { "HP NC370T Multifunction Gigabit Server Adapter" },
95         { "HP NC370i Multifunction Gigabit Server Adapter" },
96         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97         { "HP NC370F Multifunction Gigabit Server Adapter" },
98         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
100         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
101         };
102
103 static struct pci_device_id bnx2_pci_tbl[] = {
104         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
120         { 0, }
121 };
122
123 static struct flash_spec flash_table[] =
124 {
125         /* Slow EEPROM */
126         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
127          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
128          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129          "EEPROM - slow"},
130         /* Expansion entry 0001 */
131         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
132          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
133          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134          "Entry 0001"},
135         /* Saifun SA25F010 (non-buffered flash) */
136         /* strap, cfg1, & write1 need updates */
137         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
138          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
140          "Non-buffered flash (128kB)"},
141         /* Saifun SA25F020 (non-buffered flash) */
142         /* strap, cfg1, & write1 need updates */
143         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
144          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
146          "Non-buffered flash (256kB)"},
147         /* Expansion entry 0100 */
148         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
149          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151          "Entry 0100"},
152         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
153         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
154          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
155          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
156          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
157         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
158         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
159          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
161          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
162         /* Saifun SA25F005 (non-buffered flash) */
163         /* strap, cfg1, & write1 need updates */
164         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
165          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
167          "Non-buffered flash (64kB)"},
168         /* Fast EEPROM */
169         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
170          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172          "EEPROM - fast"},
173         /* Expansion entry 1001 */
174         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
175          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177          "Entry 1001"},
178         /* Expansion entry 1010 */
179         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
180          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182          "Entry 1010"},
183         /* ATMEL AT45DB011B (buffered flash) */
184         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
185          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
186          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
187          "Buffered flash (128kB)"},
188         /* Expansion entry 1100 */
189         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
190          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
192          "Entry 1100"},
193         /* Expansion entry 1101 */
194         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
195          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197          "Entry 1101"},
198         /* Ateml Expansion entry 1110 */
199         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
200          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
201          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
202          "Entry 1110 (Atmel)"},
203         /* ATMEL AT45DB021B (buffered flash) */
204         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
205          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
207          "Buffered flash (256kB)"},
208 };
209
210 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
211
212 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
213 {
214         u32 diff;
215
216         smp_mb();
217
218         /* The ring uses 256 indices for 255 entries, one of them
219          * needs to be skipped.
220          */
221         diff = bp->tx_prod - bp->tx_cons;
222         if (unlikely(diff >= TX_DESC_CNT)) {
223                 diff &= 0xffff;
224                 if (diff == TX_DESC_CNT)
225                         diff = MAX_TX_DESC_CNT;
226         }
227         return (bp->tx_ring_size - diff);
228 }
229
230 static u32
231 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
232 {
233         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
234         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
235 }
236
237 static void
238 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
239 {
240         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
242 }
243
244 static void
245 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
246 {
247         offset += cid_addr;
248         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
249                 int i;
250
251                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
252                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
253                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
254                 for (i = 0; i < 5; i++) {
255                         u32 val;
256                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
257                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
258                                 break;
259                         udelay(5);
260                 }
261         } else {
262                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
263                 REG_WR(bp, BNX2_CTX_DATA, val);
264         }
265 }
266
267 static int
268 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
269 {
270         u32 val1;
271         int i, ret;
272
273         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
274                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
275                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
276
277                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
278                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
279
280                 udelay(40);
281         }
282
283         val1 = (bp->phy_addr << 21) | (reg << 16) |
284                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
285                 BNX2_EMAC_MDIO_COMM_START_BUSY;
286         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
287
288         for (i = 0; i < 50; i++) {
289                 udelay(10);
290
291                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
292                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
293                         udelay(5);
294
295                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
296                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
297
298                         break;
299                 }
300         }
301
302         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
303                 *val = 0x0;
304                 ret = -EBUSY;
305         }
306         else {
307                 *val = val1;
308                 ret = 0;
309         }
310
311         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
312                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318                 udelay(40);
319         }
320
321         return ret;
322 }
323
324 static int
325 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
326 {
327         u32 val1;
328         int i, ret;
329
330         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
331                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
332                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
333
334                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
335                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
336
337                 udelay(40);
338         }
339
340         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
341                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
342                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
343         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
344
345         for (i = 0; i < 50; i++) {
346                 udelay(10);
347
348                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
349                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
350                         udelay(5);
351                         break;
352                 }
353         }
354
355         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
356                 ret = -EBUSY;
357         else
358                 ret = 0;
359
360         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
361                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
363
364                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
365                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366
367                 udelay(40);
368         }
369
370         return ret;
371 }
372
373 static void
374 bnx2_disable_int(struct bnx2 *bp)
375 {
376         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
377                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
378         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
379 }
380
381 static void
382 bnx2_enable_int(struct bnx2 *bp)
383 {
384         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
385                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
386                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
387
388         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
390
391         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
392 }
393
394 static void
395 bnx2_disable_int_sync(struct bnx2 *bp)
396 {
397         atomic_inc(&bp->intr_sem);
398         bnx2_disable_int(bp);
399         synchronize_irq(bp->pdev->irq);
400 }
401
402 static void
403 bnx2_netif_stop(struct bnx2 *bp)
404 {
405         bnx2_disable_int_sync(bp);
406         if (netif_running(bp->dev)) {
407                 netif_poll_disable(bp->dev);
408                 netif_tx_disable(bp->dev);
409                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
410         }
411 }
412
413 static void
414 bnx2_netif_start(struct bnx2 *bp)
415 {
416         if (atomic_dec_and_test(&bp->intr_sem)) {
417                 if (netif_running(bp->dev)) {
418                         netif_wake_queue(bp->dev);
419                         netif_poll_enable(bp->dev);
420                         bnx2_enable_int(bp);
421                 }
422         }
423 }
424
425 static void
426 bnx2_free_mem(struct bnx2 *bp)
427 {
428         int i;
429
430         for (i = 0; i < bp->ctx_pages; i++) {
431                 if (bp->ctx_blk[i]) {
432                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
433                                             bp->ctx_blk[i],
434                                             bp->ctx_blk_mapping[i]);
435                         bp->ctx_blk[i] = NULL;
436                 }
437         }
438         if (bp->status_blk) {
439                 pci_free_consistent(bp->pdev, bp->status_stats_size,
440                                     bp->status_blk, bp->status_blk_mapping);
441                 bp->status_blk = NULL;
442                 bp->stats_blk = NULL;
443         }
444         if (bp->tx_desc_ring) {
445                 pci_free_consistent(bp->pdev,
446                                     sizeof(struct tx_bd) * TX_DESC_CNT,
447                                     bp->tx_desc_ring, bp->tx_desc_mapping);
448                 bp->tx_desc_ring = NULL;
449         }
450         kfree(bp->tx_buf_ring);
451         bp->tx_buf_ring = NULL;
452         for (i = 0; i < bp->rx_max_ring; i++) {
453                 if (bp->rx_desc_ring[i])
454                         pci_free_consistent(bp->pdev,
455                                             sizeof(struct rx_bd) * RX_DESC_CNT,
456                                             bp->rx_desc_ring[i],
457                                             bp->rx_desc_mapping[i]);
458                 bp->rx_desc_ring[i] = NULL;
459         }
460         vfree(bp->rx_buf_ring);
461         bp->rx_buf_ring = NULL;
462 }
463
464 static int
465 bnx2_alloc_mem(struct bnx2 *bp)
466 {
467         int i, status_blk_size;
468
469         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
470                                   GFP_KERNEL);
471         if (bp->tx_buf_ring == NULL)
472                 return -ENOMEM;
473
474         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
475                                                 sizeof(struct tx_bd) *
476                                                 TX_DESC_CNT,
477                                                 &bp->tx_desc_mapping);
478         if (bp->tx_desc_ring == NULL)
479                 goto alloc_mem_err;
480
481         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
482                                   bp->rx_max_ring);
483         if (bp->rx_buf_ring == NULL)
484                 goto alloc_mem_err;
485
486         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
487                                    bp->rx_max_ring);
488
489         for (i = 0; i < bp->rx_max_ring; i++) {
490                 bp->rx_desc_ring[i] =
491                         pci_alloc_consistent(bp->pdev,
492                                              sizeof(struct rx_bd) * RX_DESC_CNT,
493                                              &bp->rx_desc_mapping[i]);
494                 if (bp->rx_desc_ring[i] == NULL)
495                         goto alloc_mem_err;
496
497         }
498
499         /* Combine status and statistics blocks into one allocation. */
500         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
501         bp->status_stats_size = status_blk_size +
502                                 sizeof(struct statistics_block);
503
504         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
505                                               &bp->status_blk_mapping);
506         if (bp->status_blk == NULL)
507                 goto alloc_mem_err;
508
509         memset(bp->status_blk, 0, bp->status_stats_size);
510
511         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
512                                   status_blk_size);
513
514         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
515
516         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
517                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
518                 if (bp->ctx_pages == 0)
519                         bp->ctx_pages = 1;
520                 for (i = 0; i < bp->ctx_pages; i++) {
521                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
522                                                 BCM_PAGE_SIZE,
523                                                 &bp->ctx_blk_mapping[i]);
524                         if (bp->ctx_blk[i] == NULL)
525                                 goto alloc_mem_err;
526                 }
527         }
528         return 0;
529
530 alloc_mem_err:
531         bnx2_free_mem(bp);
532         return -ENOMEM;
533 }
534
535 static void
536 bnx2_report_fw_link(struct bnx2 *bp)
537 {
538         u32 fw_link_status = 0;
539
540         if (bp->link_up) {
541                 u32 bmsr;
542
543                 switch (bp->line_speed) {
544                 case SPEED_10:
545                         if (bp->duplex == DUPLEX_HALF)
546                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
547                         else
548                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
549                         break;
550                 case SPEED_100:
551                         if (bp->duplex == DUPLEX_HALF)
552                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
553                         else
554                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
555                         break;
556                 case SPEED_1000:
557                         if (bp->duplex == DUPLEX_HALF)
558                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
559                         else
560                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
561                         break;
562                 case SPEED_2500:
563                         if (bp->duplex == DUPLEX_HALF)
564                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
565                         else
566                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
567                         break;
568                 }
569
570                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
571
572                 if (bp->autoneg) {
573                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
574
575                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
576                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
577
578                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
579                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
580                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
581                         else
582                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
583                 }
584         }
585         else
586                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
587
588         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
589 }
590
591 static void
592 bnx2_report_link(struct bnx2 *bp)
593 {
594         if (bp->link_up) {
595                 netif_carrier_on(bp->dev);
596                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
597
598                 printk("%d Mbps ", bp->line_speed);
599
600                 if (bp->duplex == DUPLEX_FULL)
601                         printk("full duplex");
602                 else
603                         printk("half duplex");
604
605                 if (bp->flow_ctrl) {
606                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
607                                 printk(", receive ");
608                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
609                                         printk("& transmit ");
610                         }
611                         else {
612                                 printk(", transmit ");
613                         }
614                         printk("flow control ON");
615                 }
616                 printk("\n");
617         }
618         else {
619                 netif_carrier_off(bp->dev);
620                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
621         }
622
623         bnx2_report_fw_link(bp);
624 }
625
626 static void
627 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
628 {
629         u32 local_adv, remote_adv;
630
631         bp->flow_ctrl = 0;
632         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
633                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
634
635                 if (bp->duplex == DUPLEX_FULL) {
636                         bp->flow_ctrl = bp->req_flow_ctrl;
637                 }
638                 return;
639         }
640
641         if (bp->duplex != DUPLEX_FULL) {
642                 return;
643         }
644
645         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
646             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
647                 u32 val;
648
649                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
650                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
651                         bp->flow_ctrl |= FLOW_CTRL_TX;
652                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
653                         bp->flow_ctrl |= FLOW_CTRL_RX;
654                 return;
655         }
656
657         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
658         bnx2_read_phy(bp, MII_LPA, &remote_adv);
659
660         if (bp->phy_flags & PHY_SERDES_FLAG) {
661                 u32 new_local_adv = 0;
662                 u32 new_remote_adv = 0;
663
664                 if (local_adv & ADVERTISE_1000XPAUSE)
665                         new_local_adv |= ADVERTISE_PAUSE_CAP;
666                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
667                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
668                 if (remote_adv & ADVERTISE_1000XPAUSE)
669                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
670                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
671                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
672
673                 local_adv = new_local_adv;
674                 remote_adv = new_remote_adv;
675         }
676
677         /* See Table 28B-3 of 802.3ab-1999 spec. */
678         if (local_adv & ADVERTISE_PAUSE_CAP) {
679                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
680                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
681                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
682                         }
683                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
684                                 bp->flow_ctrl = FLOW_CTRL_RX;
685                         }
686                 }
687                 else {
688                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
689                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
690                         }
691                 }
692         }
693         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
694                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
695                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
696
697                         bp->flow_ctrl = FLOW_CTRL_TX;
698                 }
699         }
700 }
701
702 static int
703 bnx2_5708s_linkup(struct bnx2 *bp)
704 {
705         u32 val;
706
707         bp->link_up = 1;
708         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
709         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
710                 case BCM5708S_1000X_STAT1_SPEED_10:
711                         bp->line_speed = SPEED_10;
712                         break;
713                 case BCM5708S_1000X_STAT1_SPEED_100:
714                         bp->line_speed = SPEED_100;
715                         break;
716                 case BCM5708S_1000X_STAT1_SPEED_1G:
717                         bp->line_speed = SPEED_1000;
718                         break;
719                 case BCM5708S_1000X_STAT1_SPEED_2G5:
720                         bp->line_speed = SPEED_2500;
721                         break;
722         }
723         if (val & BCM5708S_1000X_STAT1_FD)
724                 bp->duplex = DUPLEX_FULL;
725         else
726                 bp->duplex = DUPLEX_HALF;
727
728         return 0;
729 }
730
731 static int
732 bnx2_5706s_linkup(struct bnx2 *bp)
733 {
734         u32 bmcr, local_adv, remote_adv, common;
735
736         bp->link_up = 1;
737         bp->line_speed = SPEED_1000;
738
739         bnx2_read_phy(bp, MII_BMCR, &bmcr);
740         if (bmcr & BMCR_FULLDPLX) {
741                 bp->duplex = DUPLEX_FULL;
742         }
743         else {
744                 bp->duplex = DUPLEX_HALF;
745         }
746
747         if (!(bmcr & BMCR_ANENABLE)) {
748                 return 0;
749         }
750
751         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
752         bnx2_read_phy(bp, MII_LPA, &remote_adv);
753
754         common = local_adv & remote_adv;
755         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
756
757                 if (common & ADVERTISE_1000XFULL) {
758                         bp->duplex = DUPLEX_FULL;
759                 }
760                 else {
761                         bp->duplex = DUPLEX_HALF;
762                 }
763         }
764
765         return 0;
766 }
767
768 static int
769 bnx2_copper_linkup(struct bnx2 *bp)
770 {
771         u32 bmcr;
772
773         bnx2_read_phy(bp, MII_BMCR, &bmcr);
774         if (bmcr & BMCR_ANENABLE) {
775                 u32 local_adv, remote_adv, common;
776
777                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
778                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
779
780                 common = local_adv & (remote_adv >> 2);
781                 if (common & ADVERTISE_1000FULL) {
782                         bp->line_speed = SPEED_1000;
783                         bp->duplex = DUPLEX_FULL;
784                 }
785                 else if (common & ADVERTISE_1000HALF) {
786                         bp->line_speed = SPEED_1000;
787                         bp->duplex = DUPLEX_HALF;
788                 }
789                 else {
790                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
791                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
792
793                         common = local_adv & remote_adv;
794                         if (common & ADVERTISE_100FULL) {
795                                 bp->line_speed = SPEED_100;
796                                 bp->duplex = DUPLEX_FULL;
797                         }
798                         else if (common & ADVERTISE_100HALF) {
799                                 bp->line_speed = SPEED_100;
800                                 bp->duplex = DUPLEX_HALF;
801                         }
802                         else if (common & ADVERTISE_10FULL) {
803                                 bp->line_speed = SPEED_10;
804                                 bp->duplex = DUPLEX_FULL;
805                         }
806                         else if (common & ADVERTISE_10HALF) {
807                                 bp->line_speed = SPEED_10;
808                                 bp->duplex = DUPLEX_HALF;
809                         }
810                         else {
811                                 bp->line_speed = 0;
812                                 bp->link_up = 0;
813                         }
814                 }
815         }
816         else {
817                 if (bmcr & BMCR_SPEED100) {
818                         bp->line_speed = SPEED_100;
819                 }
820                 else {
821                         bp->line_speed = SPEED_10;
822                 }
823                 if (bmcr & BMCR_FULLDPLX) {
824                         bp->duplex = DUPLEX_FULL;
825                 }
826                 else {
827                         bp->duplex = DUPLEX_HALF;
828                 }
829         }
830
831         return 0;
832 }
833
834 static int
835 bnx2_set_mac_link(struct bnx2 *bp)
836 {
837         u32 val;
838
839         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
840         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
841                 (bp->duplex == DUPLEX_HALF)) {
842                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
843         }
844
845         /* Configure the EMAC mode register. */
846         val = REG_RD(bp, BNX2_EMAC_MODE);
847
848         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
849                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
850                 BNX2_EMAC_MODE_25G_MODE);
851
852         if (bp->link_up) {
853                 switch (bp->line_speed) {
854                         case SPEED_10:
855                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
856                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
857                                         break;
858                                 }
859                                 /* fall through */
860                         case SPEED_100:
861                                 val |= BNX2_EMAC_MODE_PORT_MII;
862                                 break;
863                         case SPEED_2500:
864                                 val |= BNX2_EMAC_MODE_25G_MODE;
865                                 /* fall through */
866                         case SPEED_1000:
867                                 val |= BNX2_EMAC_MODE_PORT_GMII;
868                                 break;
869                 }
870         }
871         else {
872                 val |= BNX2_EMAC_MODE_PORT_GMII;
873         }
874
875         /* Set the MAC to operate in the appropriate duplex mode. */
876         if (bp->duplex == DUPLEX_HALF)
877                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
878         REG_WR(bp, BNX2_EMAC_MODE, val);
879
880         /* Enable/disable rx PAUSE. */
881         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
882
883         if (bp->flow_ctrl & FLOW_CTRL_RX)
884                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
885         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
886
887         /* Enable/disable tx PAUSE. */
888         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
889         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
890
891         if (bp->flow_ctrl & FLOW_CTRL_TX)
892                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
893         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
894
895         /* Acknowledge the interrupt. */
896         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
897
898         return 0;
899 }
900
901 static int
902 bnx2_set_link(struct bnx2 *bp)
903 {
904         u32 bmsr;
905         u8 link_up;
906
907         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
908                 bp->link_up = 1;
909                 return 0;
910         }
911
912         link_up = bp->link_up;
913
914         bnx2_read_phy(bp, MII_BMSR, &bmsr);
915         bnx2_read_phy(bp, MII_BMSR, &bmsr);
916
917         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
918             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
919                 u32 val;
920
921                 val = REG_RD(bp, BNX2_EMAC_STATUS);
922                 if (val & BNX2_EMAC_STATUS_LINK)
923                         bmsr |= BMSR_LSTATUS;
924                 else
925                         bmsr &= ~BMSR_LSTATUS;
926         }
927
928         if (bmsr & BMSR_LSTATUS) {
929                 bp->link_up = 1;
930
931                 if (bp->phy_flags & PHY_SERDES_FLAG) {
932                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
933                                 bnx2_5706s_linkup(bp);
934                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
935                                 bnx2_5708s_linkup(bp);
936                 }
937                 else {
938                         bnx2_copper_linkup(bp);
939                 }
940                 bnx2_resolve_flow_ctrl(bp);
941         }
942         else {
943                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
944                         (bp->autoneg & AUTONEG_SPEED)) {
945
946                         u32 bmcr;
947
948                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
949                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
950                         if (!(bmcr & BMCR_ANENABLE)) {
951                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
952                                         BMCR_ANENABLE);
953                         }
954                 }
955                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
956                 bp->link_up = 0;
957         }
958
959         if (bp->link_up != link_up) {
960                 bnx2_report_link(bp);
961         }
962
963         bnx2_set_mac_link(bp);
964
965         return 0;
966 }
967
968 static int
969 bnx2_reset_phy(struct bnx2 *bp)
970 {
971         int i;
972         u32 reg;
973
974         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
975
976 #define PHY_RESET_MAX_WAIT 100
977         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
978                 udelay(10);
979
980                 bnx2_read_phy(bp, MII_BMCR, &reg);
981                 if (!(reg & BMCR_RESET)) {
982                         udelay(20);
983                         break;
984                 }
985         }
986         if (i == PHY_RESET_MAX_WAIT) {
987                 return -EBUSY;
988         }
989         return 0;
990 }
991
992 static u32
993 bnx2_phy_get_pause_adv(struct bnx2 *bp)
994 {
995         u32 adv = 0;
996
997         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
998                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
999
1000                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1001                         adv = ADVERTISE_1000XPAUSE;
1002                 }
1003                 else {
1004                         adv = ADVERTISE_PAUSE_CAP;
1005                 }
1006         }
1007         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1008                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1009                         adv = ADVERTISE_1000XPSE_ASYM;
1010                 }
1011                 else {
1012                         adv = ADVERTISE_PAUSE_ASYM;
1013                 }
1014         }
1015         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1016                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1017                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1018                 }
1019                 else {
1020                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1021                 }
1022         }
1023         return adv;
1024 }
1025
1026 static int
1027 bnx2_setup_serdes_phy(struct bnx2 *bp)
1028 {
1029         u32 adv, bmcr, up1;
1030         u32 new_adv = 0;
1031
1032         if (!(bp->autoneg & AUTONEG_SPEED)) {
1033                 u32 new_bmcr;
1034                 int force_link_down = 0;
1035
1036                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1037                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1038
1039                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1040                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1041                 new_bmcr |= BMCR_SPEED1000;
1042                 if (bp->req_line_speed == SPEED_2500) {
1043                         new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1044                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045                         if (!(up1 & BCM5708S_UP1_2G5)) {
1046                                 up1 |= BCM5708S_UP1_2G5;
1047                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1048                                 force_link_down = 1;
1049                         }
1050                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1051                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1052                         if (up1 & BCM5708S_UP1_2G5) {
1053                                 up1 &= ~BCM5708S_UP1_2G5;
1054                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1055                                 force_link_down = 1;
1056                         }
1057                 }
1058
1059                 if (bp->req_duplex == DUPLEX_FULL) {
1060                         adv |= ADVERTISE_1000XFULL;
1061                         new_bmcr |= BMCR_FULLDPLX;
1062                 }
1063                 else {
1064                         adv |= ADVERTISE_1000XHALF;
1065                         new_bmcr &= ~BMCR_FULLDPLX;
1066                 }
1067                 if ((new_bmcr != bmcr) || (force_link_down)) {
1068                         /* Force a link down visible on the other side */
1069                         if (bp->link_up) {
1070                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1071                                                ~(ADVERTISE_1000XFULL |
1072                                                  ADVERTISE_1000XHALF));
1073                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
1074                                         BMCR_ANRESTART | BMCR_ANENABLE);
1075
1076                                 bp->link_up = 0;
1077                                 netif_carrier_off(bp->dev);
1078                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1079                                 bnx2_report_link(bp);
1080                         }
1081                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
1082                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1083                 }
1084                 return 0;
1085         }
1086
1087         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1088                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1089                 up1 |= BCM5708S_UP1_2G5;
1090                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1091         }
1092
1093         if (bp->advertising & ADVERTISED_1000baseT_Full)
1094                 new_adv |= ADVERTISE_1000XFULL;
1095
1096         new_adv |= bnx2_phy_get_pause_adv(bp);
1097
1098         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1099         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1100
1101         bp->serdes_an_pending = 0;
1102         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1103                 /* Force a link down visible on the other side */
1104                 if (bp->link_up) {
1105                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1106                         spin_unlock_bh(&bp->phy_lock);
1107                         msleep(20);
1108                         spin_lock_bh(&bp->phy_lock);
1109                 }
1110
1111                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1112                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1113                         BMCR_ANENABLE);
1114                 /* Speed up link-up time when the link partner
1115                  * does not autonegotiate which is very common
1116                  * in blade servers. Some blade servers use
1117                  * IPMI for kerboard input and it's important
1118                  * to minimize link disruptions. Autoneg. involves
1119                  * exchanging base pages plus 3 next pages and
1120                  * normally completes in about 120 msec.
1121                  */
1122                 bp->current_interval = SERDES_AN_TIMEOUT;
1123                 bp->serdes_an_pending = 1;
1124                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1125         }
1126
1127         return 0;
1128 }
1129
1130 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1131         (ADVERTISED_1000baseT_Full)
1132
1133 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1134         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1135         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1136         ADVERTISED_1000baseT_Full)
1137
1138 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1139         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1140
1141 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1142
1143 static int
1144 bnx2_setup_copper_phy(struct bnx2 *bp)
1145 {
1146         u32 bmcr;
1147         u32 new_bmcr;
1148
1149         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1150
1151         if (bp->autoneg & AUTONEG_SPEED) {
1152                 u32 adv_reg, adv1000_reg;
1153                 u32 new_adv_reg = 0;
1154                 u32 new_adv1000_reg = 0;
1155
1156                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1157                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1158                         ADVERTISE_PAUSE_ASYM);
1159
1160                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1161                 adv1000_reg &= PHY_ALL_1000_SPEED;
1162
1163                 if (bp->advertising & ADVERTISED_10baseT_Half)
1164                         new_adv_reg |= ADVERTISE_10HALF;
1165                 if (bp->advertising & ADVERTISED_10baseT_Full)
1166                         new_adv_reg |= ADVERTISE_10FULL;
1167                 if (bp->advertising & ADVERTISED_100baseT_Half)
1168                         new_adv_reg |= ADVERTISE_100HALF;
1169                 if (bp->advertising & ADVERTISED_100baseT_Full)
1170                         new_adv_reg |= ADVERTISE_100FULL;
1171                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1172                         new_adv1000_reg |= ADVERTISE_1000FULL;
1173
1174                 new_adv_reg |= ADVERTISE_CSMA;
1175
1176                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1177
1178                 if ((adv1000_reg != new_adv1000_reg) ||
1179                         (adv_reg != new_adv_reg) ||
1180                         ((bmcr & BMCR_ANENABLE) == 0)) {
1181
1182                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1183                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1184                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1185                                 BMCR_ANENABLE);
1186                 }
1187                 else if (bp->link_up) {
1188                         /* Flow ctrl may have changed from auto to forced */
1189                         /* or vice-versa. */
1190
1191                         bnx2_resolve_flow_ctrl(bp);
1192                         bnx2_set_mac_link(bp);
1193                 }
1194                 return 0;
1195         }
1196
1197         new_bmcr = 0;
1198         if (bp->req_line_speed == SPEED_100) {
1199                 new_bmcr |= BMCR_SPEED100;
1200         }
1201         if (bp->req_duplex == DUPLEX_FULL) {
1202                 new_bmcr |= BMCR_FULLDPLX;
1203         }
1204         if (new_bmcr != bmcr) {
1205                 u32 bmsr;
1206
1207                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1208                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1209
1210                 if (bmsr & BMSR_LSTATUS) {
1211                         /* Force link down */
1212                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1213                         spin_unlock_bh(&bp->phy_lock);
1214                         msleep(50);
1215                         spin_lock_bh(&bp->phy_lock);
1216
1217                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1218                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1219                 }
1220
1221                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1222
1223                 /* Normally, the new speed is setup after the link has
1224                  * gone down and up again. In some cases, link will not go
1225                  * down so we need to set up the new speed here.
1226                  */
1227                 if (bmsr & BMSR_LSTATUS) {
1228                         bp->line_speed = bp->req_line_speed;
1229                         bp->duplex = bp->req_duplex;
1230                         bnx2_resolve_flow_ctrl(bp);
1231                         bnx2_set_mac_link(bp);
1232                 }
1233         }
1234         return 0;
1235 }
1236
1237 static int
1238 bnx2_setup_phy(struct bnx2 *bp)
1239 {
1240         if (bp->loopback == MAC_LOOPBACK)
1241                 return 0;
1242
1243         if (bp->phy_flags & PHY_SERDES_FLAG) {
1244                 return (bnx2_setup_serdes_phy(bp));
1245         }
1246         else {
1247                 return (bnx2_setup_copper_phy(bp));
1248         }
1249 }
1250
1251 static int
1252 bnx2_init_5708s_phy(struct bnx2 *bp)
1253 {
1254         u32 val;
1255
1256         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1257         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1258         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1259
1260         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1261         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1262         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1263
1264         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1265         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1266         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1267
1268         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1269                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1270                 val |= BCM5708S_UP1_2G5;
1271                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1272         }
1273
1274         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1275             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1276             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1277                 /* increase tx signal amplitude */
1278                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1279                                BCM5708S_BLK_ADDR_TX_MISC);
1280                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1281                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1282                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1283                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1284         }
1285
1286         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1287               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1288
1289         if (val) {
1290                 u32 is_backplane;
1291
1292                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1293                                           BNX2_SHARED_HW_CFG_CONFIG);
1294                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1295                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1296                                        BCM5708S_BLK_ADDR_TX_MISC);
1297                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1298                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299                                        BCM5708S_BLK_ADDR_DIG);
1300                 }
1301         }
1302         return 0;
1303 }
1304
1305 static int
1306 bnx2_init_5706s_phy(struct bnx2 *bp)
1307 {
1308         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1309
1310         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1311                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1312
1313         if (bp->dev->mtu > 1500) {
1314                 u32 val;
1315
1316                 /* Set extended packet length bit */
1317                 bnx2_write_phy(bp, 0x18, 0x7);
1318                 bnx2_read_phy(bp, 0x18, &val);
1319                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1320
1321                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1322                 bnx2_read_phy(bp, 0x1c, &val);
1323                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1324         }
1325         else {
1326                 u32 val;
1327
1328                 bnx2_write_phy(bp, 0x18, 0x7);
1329                 bnx2_read_phy(bp, 0x18, &val);
1330                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1331
1332                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1333                 bnx2_read_phy(bp, 0x1c, &val);
1334                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1335         }
1336
1337         return 0;
1338 }
1339
1340 static int
1341 bnx2_init_copper_phy(struct bnx2 *bp)
1342 {
1343         u32 val;
1344
1345         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1346                 bnx2_write_phy(bp, 0x18, 0x0c00);
1347                 bnx2_write_phy(bp, 0x17, 0x000a);
1348                 bnx2_write_phy(bp, 0x15, 0x310b);
1349                 bnx2_write_phy(bp, 0x17, 0x201f);
1350                 bnx2_write_phy(bp, 0x15, 0x9506);
1351                 bnx2_write_phy(bp, 0x17, 0x401f);
1352                 bnx2_write_phy(bp, 0x15, 0x14e2);
1353                 bnx2_write_phy(bp, 0x18, 0x0400);
1354         }
1355
1356         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1357                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1358                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1359                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1360                 val &= ~(1 << 8);
1361                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1362         }
1363
1364         if (bp->dev->mtu > 1500) {
1365                 /* Set extended packet length bit */
1366                 bnx2_write_phy(bp, 0x18, 0x7);
1367                 bnx2_read_phy(bp, 0x18, &val);
1368                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1369
1370                 bnx2_read_phy(bp, 0x10, &val);
1371                 bnx2_write_phy(bp, 0x10, val | 0x1);
1372         }
1373         else {
1374                 bnx2_write_phy(bp, 0x18, 0x7);
1375                 bnx2_read_phy(bp, 0x18, &val);
1376                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1377
1378                 bnx2_read_phy(bp, 0x10, &val);
1379                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1380         }
1381
1382         /* ethernet@wirespeed */
1383         bnx2_write_phy(bp, 0x18, 0x7007);
1384         bnx2_read_phy(bp, 0x18, &val);
1385         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1386         return 0;
1387 }
1388
1389
1390 static int
1391 bnx2_init_phy(struct bnx2 *bp)
1392 {
1393         u32 val;
1394         int rc = 0;
1395
1396         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1397         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1398
1399         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1400
1401         bnx2_reset_phy(bp);
1402
1403         bnx2_read_phy(bp, MII_PHYSID1, &val);
1404         bp->phy_id = val << 16;
1405         bnx2_read_phy(bp, MII_PHYSID2, &val);
1406         bp->phy_id |= val & 0xffff;
1407
1408         if (bp->phy_flags & PHY_SERDES_FLAG) {
1409                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1410                         rc = bnx2_init_5706s_phy(bp);
1411                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1412                         rc = bnx2_init_5708s_phy(bp);
1413         }
1414         else {
1415                 rc = bnx2_init_copper_phy(bp);
1416         }
1417
1418         bnx2_setup_phy(bp);
1419
1420         return rc;
1421 }
1422
1423 static int
1424 bnx2_set_mac_loopback(struct bnx2 *bp)
1425 {
1426         u32 mac_mode;
1427
1428         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1429         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1430         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1431         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1432         bp->link_up = 1;
1433         return 0;
1434 }
1435
1436 static int bnx2_test_link(struct bnx2 *);
1437
1438 static int
1439 bnx2_set_phy_loopback(struct bnx2 *bp)
1440 {
1441         u32 mac_mode;
1442         int rc, i;
1443
1444         spin_lock_bh(&bp->phy_lock);
1445         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1446                             BMCR_SPEED1000);
1447         spin_unlock_bh(&bp->phy_lock);
1448         if (rc)
1449                 return rc;
1450
1451         for (i = 0; i < 10; i++) {
1452                 if (bnx2_test_link(bp) == 0)
1453                         break;
1454                 msleep(100);
1455         }
1456
1457         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1458         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1459                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1460                       BNX2_EMAC_MODE_25G_MODE);
1461
1462         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1463         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1464         bp->link_up = 1;
1465         return 0;
1466 }
1467
1468 static int
1469 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1470 {
1471         int i;
1472         u32 val;
1473
1474         bp->fw_wr_seq++;
1475         msg_data |= bp->fw_wr_seq;
1476
1477         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1478
1479         /* wait for an acknowledgement. */
1480         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1481                 msleep(10);
1482
1483                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1484
1485                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1486                         break;
1487         }
1488         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1489                 return 0;
1490
1491         /* If we timed out, inform the firmware that this is the case. */
1492         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1493                 if (!silent)
1494                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1495                                             "%x\n", msg_data);
1496
1497                 msg_data &= ~BNX2_DRV_MSG_CODE;
1498                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1499
1500                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1501
1502                 return -EBUSY;
1503         }
1504
1505         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1506                 return -EIO;
1507
1508         return 0;
1509 }
1510
1511 static int
1512 bnx2_init_5709_context(struct bnx2 *bp)
1513 {
1514         int i, ret = 0;
1515         u32 val;
1516
1517         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1518         val |= (BCM_PAGE_BITS - 8) << 16;
1519         REG_WR(bp, BNX2_CTX_COMMAND, val);
1520         for (i = 0; i < bp->ctx_pages; i++) {
1521                 int j;
1522
1523                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1524                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
1525                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1526                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1527                        (u64) bp->ctx_blk_mapping[i] >> 32);
1528                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1529                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1530                 for (j = 0; j < 10; j++) {
1531
1532                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1533                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1534                                 break;
1535                         udelay(5);
1536                 }
1537                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1538                         ret = -EBUSY;
1539                         break;
1540                 }
1541         }
1542         return ret;
1543 }
1544
1545 static void
1546 bnx2_init_context(struct bnx2 *bp)
1547 {
1548         u32 vcid;
1549
1550         vcid = 96;
1551         while (vcid) {
1552                 u32 vcid_addr, pcid_addr, offset;
1553
1554                 vcid--;
1555
1556                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1557                         u32 new_vcid;
1558
1559                         vcid_addr = GET_PCID_ADDR(vcid);
1560                         if (vcid & 0x8) {
1561                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1562                         }
1563                         else {
1564                                 new_vcid = vcid;
1565                         }
1566                         pcid_addr = GET_PCID_ADDR(new_vcid);
1567                 }
1568                 else {
1569                         vcid_addr = GET_CID_ADDR(vcid);
1570                         pcid_addr = vcid_addr;
1571                 }
1572
1573                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1574                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1575
1576                 /* Zero out the context. */
1577                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1578                         CTX_WR(bp, 0x00, offset, 0);
1579                 }
1580
1581                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1582                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1583         }
1584 }
1585
1586 static int
1587 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1588 {
1589         u16 *good_mbuf;
1590         u32 good_mbuf_cnt;
1591         u32 val;
1592
1593         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1594         if (good_mbuf == NULL) {
1595                 printk(KERN_ERR PFX "Failed to allocate memory in "
1596                                     "bnx2_alloc_bad_rbuf\n");
1597                 return -ENOMEM;
1598         }
1599
1600         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1601                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1602
1603         good_mbuf_cnt = 0;
1604
1605         /* Allocate a bunch of mbufs and save the good ones in an array. */
1606         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1607         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1608                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1609
1610                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1611
1612                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1613
1614                 /* The addresses with Bit 9 set are bad memory blocks. */
1615                 if (!(val & (1 << 9))) {
1616                         good_mbuf[good_mbuf_cnt] = (u16) val;
1617                         good_mbuf_cnt++;
1618                 }
1619
1620                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1621         }
1622
1623         /* Free the good ones back to the mbuf pool thus discarding
1624          * all the bad ones. */
1625         while (good_mbuf_cnt) {
1626                 good_mbuf_cnt--;
1627
1628                 val = good_mbuf[good_mbuf_cnt];
1629                 val = (val << 9) | val | 1;
1630
1631                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1632         }
1633         kfree(good_mbuf);
1634         return 0;
1635 }
1636
1637 static void
1638 bnx2_set_mac_addr(struct bnx2 *bp)
1639 {
1640         u32 val;
1641         u8 *mac_addr = bp->dev->dev_addr;
1642
1643         val = (mac_addr[0] << 8) | mac_addr[1];
1644
1645         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1646
1647         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1648                 (mac_addr[4] << 8) | mac_addr[5];
1649
1650         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1651 }
1652
1653 static inline int
1654 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1655 {
1656         struct sk_buff *skb;
1657         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1658         dma_addr_t mapping;
1659         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1660         unsigned long align;
1661
1662         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1663         if (skb == NULL) {
1664                 return -ENOMEM;
1665         }
1666
1667         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1668                 skb_reserve(skb, BNX2_RX_ALIGN - align);
1669
1670         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1671                 PCI_DMA_FROMDEVICE);
1672
1673         rx_buf->skb = skb;
1674         pci_unmap_addr_set(rx_buf, mapping, mapping);
1675
1676         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1677         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1678
1679         bp->rx_prod_bseq += bp->rx_buf_use_size;
1680
1681         return 0;
1682 }
1683
1684 static void
1685 bnx2_phy_int(struct bnx2 *bp)
1686 {
1687         u32 new_link_state, old_link_state;
1688
1689         new_link_state = bp->status_blk->status_attn_bits &
1690                 STATUS_ATTN_BITS_LINK_STATE;
1691         old_link_state = bp->status_blk->status_attn_bits_ack &
1692                 STATUS_ATTN_BITS_LINK_STATE;
1693         if (new_link_state != old_link_state) {
1694                 if (new_link_state) {
1695                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1696                                 STATUS_ATTN_BITS_LINK_STATE);
1697                 }
1698                 else {
1699                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1700                                 STATUS_ATTN_BITS_LINK_STATE);
1701                 }
1702                 bnx2_set_link(bp);
1703         }
1704 }
1705
1706 static void
1707 bnx2_tx_int(struct bnx2 *bp)
1708 {
1709         struct status_block *sblk = bp->status_blk;
1710         u16 hw_cons, sw_cons, sw_ring_cons;
1711         int tx_free_bd = 0;
1712
1713         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1714         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1715                 hw_cons++;
1716         }
1717         sw_cons = bp->tx_cons;
1718
1719         while (sw_cons != hw_cons) {
1720                 struct sw_bd *tx_buf;
1721                 struct sk_buff *skb;
1722                 int i, last;
1723
1724                 sw_ring_cons = TX_RING_IDX(sw_cons);
1725
1726                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1727                 skb = tx_buf->skb;
1728
1729                 /* partial BD completions possible with TSO packets */
1730                 if (skb_is_gso(skb)) {
1731                         u16 last_idx, last_ring_idx;
1732
1733                         last_idx = sw_cons +
1734                                 skb_shinfo(skb)->nr_frags + 1;
1735                         last_ring_idx = sw_ring_cons +
1736                                 skb_shinfo(skb)->nr_frags + 1;
1737                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1738                                 last_idx++;
1739                         }
1740                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1741                                 break;
1742                         }
1743                 }
1744
1745                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1746                         skb_headlen(skb), PCI_DMA_TODEVICE);
1747
1748                 tx_buf->skb = NULL;
1749                 last = skb_shinfo(skb)->nr_frags;
1750
1751                 for (i = 0; i < last; i++) {
1752                         sw_cons = NEXT_TX_BD(sw_cons);
1753
1754                         pci_unmap_page(bp->pdev,
1755                                 pci_unmap_addr(
1756                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1757                                         mapping),
1758                                 skb_shinfo(skb)->frags[i].size,
1759                                 PCI_DMA_TODEVICE);
1760                 }
1761
1762                 sw_cons = NEXT_TX_BD(sw_cons);
1763
1764                 tx_free_bd += last + 1;
1765
1766                 dev_kfree_skb(skb);
1767
1768                 hw_cons = bp->hw_tx_cons =
1769                         sblk->status_tx_quick_consumer_index0;
1770
1771                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1772                         hw_cons++;
1773                 }
1774         }
1775
1776         bp->tx_cons = sw_cons;
1777         /* Need to make the tx_cons update visible to bnx2_start_xmit()
1778          * before checking for netif_queue_stopped().  Without the
1779          * memory barrier, there is a small possibility that bnx2_start_xmit()
1780          * will miss it and cause the queue to be stopped forever.
1781          */
1782         smp_mb();
1783
1784         if (unlikely(netif_queue_stopped(bp->dev)) &&
1785                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1786                 netif_tx_lock(bp->dev);
1787                 if ((netif_queue_stopped(bp->dev)) &&
1788                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1789                         netif_wake_queue(bp->dev);
1790                 netif_tx_unlock(bp->dev);
1791         }
1792 }
1793
1794 static inline void
1795 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1796         u16 cons, u16 prod)
1797 {
1798         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1799         struct rx_bd *cons_bd, *prod_bd;
1800
1801         cons_rx_buf = &bp->rx_buf_ring[cons];
1802         prod_rx_buf = &bp->rx_buf_ring[prod];
1803
1804         pci_dma_sync_single_for_device(bp->pdev,
1805                 pci_unmap_addr(cons_rx_buf, mapping),
1806                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1807
1808         bp->rx_prod_bseq += bp->rx_buf_use_size;
1809
1810         prod_rx_buf->skb = skb;
1811
1812         if (cons == prod)
1813                 return;
1814
1815         pci_unmap_addr_set(prod_rx_buf, mapping,
1816                         pci_unmap_addr(cons_rx_buf, mapping));
1817
1818         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1819         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1820         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1821         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1822 }
1823
1824 static int
1825 bnx2_rx_int(struct bnx2 *bp, int budget)
1826 {
1827         struct status_block *sblk = bp->status_blk;
1828         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1829         struct l2_fhdr *rx_hdr;
1830         int rx_pkt = 0;
1831
1832         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1833         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1834                 hw_cons++;
1835         }
1836         sw_cons = bp->rx_cons;
1837         sw_prod = bp->rx_prod;
1838
1839         /* Memory barrier necessary as speculative reads of the rx
1840          * buffer can be ahead of the index in the status block
1841          */
1842         rmb();
1843         while (sw_cons != hw_cons) {
1844                 unsigned int len;
1845                 u32 status;
1846                 struct sw_bd *rx_buf;
1847                 struct sk_buff *skb;
1848                 dma_addr_t dma_addr;
1849
1850                 sw_ring_cons = RX_RING_IDX(sw_cons);
1851                 sw_ring_prod = RX_RING_IDX(sw_prod);
1852
1853                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1854                 skb = rx_buf->skb;
1855
1856                 rx_buf->skb = NULL;
1857
1858                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1859
1860                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1861                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1862
1863                 rx_hdr = (struct l2_fhdr *) skb->data;
1864                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1865
1866                 if ((status = rx_hdr->l2_fhdr_status) &
1867                         (L2_FHDR_ERRORS_BAD_CRC |
1868                         L2_FHDR_ERRORS_PHY_DECODE |
1869                         L2_FHDR_ERRORS_ALIGNMENT |
1870                         L2_FHDR_ERRORS_TOO_SHORT |
1871                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1872
1873                         goto reuse_rx;
1874                 }
1875
1876                 /* Since we don't have a jumbo ring, copy small packets
1877                  * if mtu > 1500
1878                  */
1879                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1880                         struct sk_buff *new_skb;
1881
1882                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
1883                         if (new_skb == NULL)
1884                                 goto reuse_rx;
1885
1886                         /* aligned copy */
1887                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
1888                                       new_skb->data, len + 2);
1889                         skb_reserve(new_skb, 2);
1890                         skb_put(new_skb, len);
1891
1892                         bnx2_reuse_rx_skb(bp, skb,
1893                                 sw_ring_cons, sw_ring_prod);
1894
1895                         skb = new_skb;
1896                 }
1897                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1898                         pci_unmap_single(bp->pdev, dma_addr,
1899                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1900
1901                         skb_reserve(skb, bp->rx_offset);
1902                         skb_put(skb, len);
1903                 }
1904                 else {
1905 reuse_rx:
1906                         bnx2_reuse_rx_skb(bp, skb,
1907                                 sw_ring_cons, sw_ring_prod);
1908                         goto next_rx;
1909                 }
1910
1911                 skb->protocol = eth_type_trans(skb, bp->dev);
1912
1913                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1914                         (ntohs(skb->protocol) != 0x8100)) {
1915
1916                         dev_kfree_skb(skb);
1917                         goto next_rx;
1918
1919                 }
1920
1921                 skb->ip_summed = CHECKSUM_NONE;
1922                 if (bp->rx_csum &&
1923                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1924                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1925
1926                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1927                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1928                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1929                 }
1930
1931 #ifdef BCM_VLAN
1932                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1933                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1934                                 rx_hdr->l2_fhdr_vlan_tag);
1935                 }
1936                 else
1937 #endif
1938                         netif_receive_skb(skb);
1939
1940                 bp->dev->last_rx = jiffies;
1941                 rx_pkt++;
1942
1943 next_rx:
1944                 sw_cons = NEXT_RX_BD(sw_cons);
1945                 sw_prod = NEXT_RX_BD(sw_prod);
1946
1947                 if ((rx_pkt == budget))
1948                         break;
1949
1950                 /* Refresh hw_cons to see if there is new work */
1951                 if (sw_cons == hw_cons) {
1952                         hw_cons = bp->hw_rx_cons =
1953                                 sblk->status_rx_quick_consumer_index0;
1954                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1955                                 hw_cons++;
1956                         rmb();
1957                 }
1958         }
1959         bp->rx_cons = sw_cons;
1960         bp->rx_prod = sw_prod;
1961
1962         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1963
1964         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1965
1966         mmiowb();
1967
1968         return rx_pkt;
1969
1970 }
1971
1972 /* MSI ISR - The only difference between this and the INTx ISR
1973  * is that the MSI interrupt is always serviced.
1974  */
1975 static irqreturn_t
1976 bnx2_msi(int irq, void *dev_instance)
1977 {
1978         struct net_device *dev = dev_instance;
1979         struct bnx2 *bp = netdev_priv(dev);
1980
1981         prefetch(bp->status_blk);
1982         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1983                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1984                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1985
1986         /* Return here if interrupt is disabled. */
1987         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1988                 return IRQ_HANDLED;
1989
1990         netif_rx_schedule(dev);
1991
1992         return IRQ_HANDLED;
1993 }
1994
1995 static irqreturn_t
1996 bnx2_interrupt(int irq, void *dev_instance)
1997 {
1998         struct net_device *dev = dev_instance;
1999         struct bnx2 *bp = netdev_priv(dev);
2000
2001         /* When using INTx, it is possible for the interrupt to arrive
2002          * at the CPU before the status block posted prior to the
2003          * interrupt. Reading a register will flush the status block.
2004          * When using MSI, the MSI message will always complete after
2005          * the status block write.
2006          */
2007         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2008             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2009              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2010                 return IRQ_NONE;
2011
2012         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2013                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2014                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2015
2016         /* Return here if interrupt is shared and is disabled. */
2017         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2018                 return IRQ_HANDLED;
2019
2020         netif_rx_schedule(dev);
2021
2022         return IRQ_HANDLED;
2023 }
2024
2025 static inline int
2026 bnx2_has_work(struct bnx2 *bp)
2027 {
2028         struct status_block *sblk = bp->status_blk;
2029
2030         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2031             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2032                 return 1;
2033
2034         if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2035             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
2036                 return 1;
2037
2038         return 0;
2039 }
2040
2041 static int
2042 bnx2_poll(struct net_device *dev, int *budget)
2043 {
2044         struct bnx2 *bp = netdev_priv(dev);
2045
2046         if ((bp->status_blk->status_attn_bits &
2047                 STATUS_ATTN_BITS_LINK_STATE) !=
2048                 (bp->status_blk->status_attn_bits_ack &
2049                 STATUS_ATTN_BITS_LINK_STATE)) {
2050
2051                 spin_lock(&bp->phy_lock);
2052                 bnx2_phy_int(bp);
2053                 spin_unlock(&bp->phy_lock);
2054
2055                 /* This is needed to take care of transient status
2056                  * during link changes.
2057                  */
2058                 REG_WR(bp, BNX2_HC_COMMAND,
2059                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2060                 REG_RD(bp, BNX2_HC_COMMAND);
2061         }
2062
2063         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2064                 bnx2_tx_int(bp);
2065
2066         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2067                 int orig_budget = *budget;
2068                 int work_done;
2069
2070                 if (orig_budget > dev->quota)
2071                         orig_budget = dev->quota;
2072
2073                 work_done = bnx2_rx_int(bp, orig_budget);
2074                 *budget -= work_done;
2075                 dev->quota -= work_done;
2076         }
2077
2078         bp->last_status_idx = bp->status_blk->status_idx;
2079         rmb();
2080
2081         if (!bnx2_has_work(bp)) {
2082                 netif_rx_complete(dev);
2083                 if (likely(bp->flags & USING_MSI_FLAG)) {
2084                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2085                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2086                                bp->last_status_idx);
2087                         return 0;
2088                 }
2089                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2090                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2091                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2092                        bp->last_status_idx);
2093
2094                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2095                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2096                        bp->last_status_idx);
2097                 return 0;
2098         }
2099
2100         return 1;
2101 }
2102
2103 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2104  * from set_multicast.
2105  */
2106 static void
2107 bnx2_set_rx_mode(struct net_device *dev)
2108 {
2109         struct bnx2 *bp = netdev_priv(dev);
2110         u32 rx_mode, sort_mode;
2111         int i;
2112
2113         spin_lock_bh(&bp->phy_lock);
2114
2115         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2116                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2117         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2118 #ifdef BCM_VLAN
2119         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2120                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2121 #else
2122         if (!(bp->flags & ASF_ENABLE_FLAG))
2123                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2124 #endif
2125         if (dev->flags & IFF_PROMISC) {
2126                 /* Promiscuous mode. */
2127                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2128                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2129                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2130         }
2131         else if (dev->flags & IFF_ALLMULTI) {
2132                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2133                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2134                                0xffffffff);
2135                 }
2136                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2137         }
2138         else {
2139                 /* Accept one or more multicast(s). */
2140                 struct dev_mc_list *mclist;
2141                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2142                 u32 regidx;
2143                 u32 bit;
2144                 u32 crc;
2145
2146                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2147
2148                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2149                      i++, mclist = mclist->next) {
2150
2151                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2152                         bit = crc & 0xff;
2153                         regidx = (bit & 0xe0) >> 5;
2154                         bit &= 0x1f;
2155                         mc_filter[regidx] |= (1 << bit);
2156                 }
2157
2158                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2159                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2160                                mc_filter[i]);
2161                 }
2162
2163                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2164         }
2165
2166         if (rx_mode != bp->rx_mode) {
2167                 bp->rx_mode = rx_mode;
2168                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2169         }
2170
2171         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2172         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2173         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2174
2175         spin_unlock_bh(&bp->phy_lock);
2176 }
2177
2178 #define FW_BUF_SIZE     0x8000
2179
2180 static int
2181 bnx2_gunzip_init(struct bnx2 *bp)
2182 {
2183         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2184                 goto gunzip_nomem1;
2185
2186         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2187                 goto gunzip_nomem2;
2188
2189         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2190         if (bp->strm->workspace == NULL)
2191                 goto gunzip_nomem3;
2192
2193         return 0;
2194
2195 gunzip_nomem3:
2196         kfree(bp->strm);
2197         bp->strm = NULL;
2198
2199 gunzip_nomem2:
2200         vfree(bp->gunzip_buf);
2201         bp->gunzip_buf = NULL;
2202
2203 gunzip_nomem1:
2204         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2205                             "uncompression.\n", bp->dev->name);
2206         return -ENOMEM;
2207 }
2208
2209 static void
2210 bnx2_gunzip_end(struct bnx2 *bp)
2211 {
2212         kfree(bp->strm->workspace);
2213
2214         kfree(bp->strm);
2215         bp->strm = NULL;
2216
2217         if (bp->gunzip_buf) {
2218                 vfree(bp->gunzip_buf);
2219                 bp->gunzip_buf = NULL;
2220         }
2221 }
2222
2223 static int
2224 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2225 {
2226         int n, rc;
2227
2228         /* check gzip header */
2229         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2230                 return -EINVAL;
2231
2232         n = 10;
2233
2234 #define FNAME   0x8
2235         if (zbuf[3] & FNAME)
2236                 while ((zbuf[n++] != 0) && (n < len));
2237
2238         bp->strm->next_in = zbuf + n;
2239         bp->strm->avail_in = len - n;
2240         bp->strm->next_out = bp->gunzip_buf;
2241         bp->strm->avail_out = FW_BUF_SIZE;
2242
2243         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2244         if (rc != Z_OK)
2245                 return rc;
2246
2247         rc = zlib_inflate(bp->strm, Z_FINISH);
2248
2249         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2250         *outbuf = bp->gunzip_buf;
2251
2252         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2253                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2254                        bp->dev->name, bp->strm->msg);
2255
2256         zlib_inflateEnd(bp->strm);
2257
2258         if (rc == Z_STREAM_END)
2259                 return 0;
2260
2261         return rc;
2262 }
2263
2264 static void
2265 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2266         u32 rv2p_proc)
2267 {
2268         int i;
2269         u32 val;
2270
2271
2272         for (i = 0; i < rv2p_code_len; i += 8) {
2273                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2274                 rv2p_code++;
2275                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2276                 rv2p_code++;
2277
2278                 if (rv2p_proc == RV2P_PROC1) {
2279                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2280                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2281                 }
2282                 else {
2283                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2284                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2285                 }
2286         }
2287
2288         /* Reset the processor, un-stall is done later. */
2289         if (rv2p_proc == RV2P_PROC1) {
2290                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2291         }
2292         else {
2293                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2294         }
2295 }
2296
2297 static int
2298 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2299 {
2300         u32 offset;
2301         u32 val;
2302         int rc;
2303
2304         /* Halt the CPU. */
2305         val = REG_RD_IND(bp, cpu_reg->mode);
2306         val |= cpu_reg->mode_value_halt;
2307         REG_WR_IND(bp, cpu_reg->mode, val);
2308         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2309
2310         /* Load the Text area. */
2311         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2312         if (fw->gz_text) {
2313                 u32 text_len;
2314                 void *text;
2315
2316                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2317                                  &text_len);
2318                 if (rc)
2319                         return rc;
2320
2321                 fw->text = text;
2322         }
2323         if (fw->gz_text) {
2324                 int j;
2325
2326                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2327                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2328                 }
2329         }
2330
2331         /* Load the Data area. */
2332         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2333         if (fw->data) {
2334                 int j;
2335
2336                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2337                         REG_WR_IND(bp, offset, fw->data[j]);
2338                 }
2339         }
2340
2341         /* Load the SBSS area. */
2342         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2343         if (fw->sbss) {
2344                 int j;
2345
2346                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2347                         REG_WR_IND(bp, offset, fw->sbss[j]);
2348                 }
2349         }
2350
2351         /* Load the BSS area. */
2352         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2353         if (fw->bss) {
2354                 int j;
2355
2356                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2357                         REG_WR_IND(bp, offset, fw->bss[j]);
2358                 }
2359         }
2360
2361         /* Load the Read-Only area. */
2362         offset = cpu_reg->spad_base +
2363                 (fw->rodata_addr - cpu_reg->mips_view_base);
2364         if (fw->rodata) {
2365                 int j;
2366
2367                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2368                         REG_WR_IND(bp, offset, fw->rodata[j]);
2369                 }
2370         }
2371
2372         /* Clear the pre-fetch instruction. */
2373         REG_WR_IND(bp, cpu_reg->inst, 0);
2374         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2375
2376         /* Start the CPU. */
2377         val = REG_RD_IND(bp, cpu_reg->mode);
2378         val &= ~cpu_reg->mode_value_halt;
2379         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2380         REG_WR_IND(bp, cpu_reg->mode, val);
2381
2382         return 0;
2383 }
2384
2385 static int
2386 bnx2_init_cpus(struct bnx2 *bp)
2387 {
2388         struct cpu_reg cpu_reg;
2389         struct fw_info *fw;
2390         int rc = 0;
2391         void *text;
2392         u32 text_len;
2393
2394         if ((rc = bnx2_gunzip_init(bp)) != 0)
2395                 return rc;
2396
2397         /* Initialize the RV2P processor. */
2398         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2399                          &text_len);
2400         if (rc)
2401                 goto init_cpu_err;
2402
2403         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2404
2405         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2406                          &text_len);
2407         if (rc)
2408                 goto init_cpu_err;
2409
2410         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2411
2412         /* Initialize the RX Processor. */
2413         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2414         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2415         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2416         cpu_reg.state = BNX2_RXP_CPU_STATE;
2417         cpu_reg.state_value_clear = 0xffffff;
2418         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2419         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2420         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2421         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2422         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2423         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2424         cpu_reg.mips_view_base = 0x8000000;
2425
2426         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2427                 fw = &bnx2_rxp_fw_09;
2428         else
2429                 fw = &bnx2_rxp_fw_06;
2430
2431         rc = load_cpu_fw(bp, &cpu_reg, fw);
2432         if (rc)
2433                 goto init_cpu_err;
2434
2435         /* Initialize the TX Processor. */
2436         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2437         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2438         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2439         cpu_reg.state = BNX2_TXP_CPU_STATE;
2440         cpu_reg.state_value_clear = 0xffffff;
2441         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2442         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2443         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2444         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2445         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2446         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2447         cpu_reg.mips_view_base = 0x8000000;
2448
2449         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2450                 fw = &bnx2_txp_fw_09;
2451         else
2452                 fw = &bnx2_txp_fw_06;
2453
2454         rc = load_cpu_fw(bp, &cpu_reg, fw);
2455         if (rc)
2456                 goto init_cpu_err;
2457
2458         /* Initialize the TX Patch-up Processor. */
2459         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2460         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2461         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2462         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2463         cpu_reg.state_value_clear = 0xffffff;
2464         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2465         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2466         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2467         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2468         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2469         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2470         cpu_reg.mips_view_base = 0x8000000;
2471
2472         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2473                 fw = &bnx2_tpat_fw_09;
2474         else
2475                 fw = &bnx2_tpat_fw_06;
2476
2477         rc = load_cpu_fw(bp, &cpu_reg, fw);
2478         if (rc)
2479                 goto init_cpu_err;
2480
2481         /* Initialize the Completion Processor. */
2482         cpu_reg.mode = BNX2_COM_CPU_MODE;
2483         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2484         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2485         cpu_reg.state = BNX2_COM_CPU_STATE;
2486         cpu_reg.state_value_clear = 0xffffff;
2487         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2488         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2489         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2490         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2491         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2492         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2493         cpu_reg.mips_view_base = 0x8000000;
2494
2495         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2496                 fw = &bnx2_com_fw_09;
2497         else
2498                 fw = &bnx2_com_fw_06;
2499
2500         rc = load_cpu_fw(bp, &cpu_reg, fw);
2501         if (rc)
2502                 goto init_cpu_err;
2503
2504         /* Initialize the Command Processor. */
2505         cpu_reg.mode = BNX2_CP_CPU_MODE;
2506         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2507         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2508         cpu_reg.state = BNX2_CP_CPU_STATE;
2509         cpu_reg.state_value_clear = 0xffffff;
2510         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2511         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2512         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2513         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2514         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2515         cpu_reg.spad_base = BNX2_CP_SCRATCH;
2516         cpu_reg.mips_view_base = 0x8000000;
2517
2518         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2519                 fw = &bnx2_cp_fw_09;
2520
2521                 rc = load_cpu_fw(bp, &cpu_reg, fw);
2522                 if (rc)
2523                         goto init_cpu_err;
2524         }
2525 init_cpu_err:
2526         bnx2_gunzip_end(bp);
2527         return rc;
2528 }
2529
2530 static int
2531 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2532 {
2533         u16 pmcsr;
2534
2535         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2536
2537         switch (state) {
2538         case PCI_D0: {
2539                 u32 val;
2540
2541                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2542                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2543                         PCI_PM_CTRL_PME_STATUS);
2544
2545                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2546                         /* delay required during transition out of D3hot */
2547                         msleep(20);
2548
2549                 val = REG_RD(bp, BNX2_EMAC_MODE);
2550                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2551                 val &= ~BNX2_EMAC_MODE_MPKT;
2552                 REG_WR(bp, BNX2_EMAC_MODE, val);
2553
2554                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2555                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2556                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2557                 break;
2558         }
2559         case PCI_D3hot: {
2560                 int i;
2561                 u32 val, wol_msg;
2562
2563                 if (bp->wol) {
2564                         u32 advertising;
2565                         u8 autoneg;
2566
2567                         autoneg = bp->autoneg;
2568                         advertising = bp->advertising;
2569
2570                         bp->autoneg = AUTONEG_SPEED;
2571                         bp->advertising = ADVERTISED_10baseT_Half |
2572                                 ADVERTISED_10baseT_Full |
2573                                 ADVERTISED_100baseT_Half |
2574                                 ADVERTISED_100baseT_Full |
2575                                 ADVERTISED_Autoneg;
2576
2577                         bnx2_setup_copper_phy(bp);
2578
2579                         bp->autoneg = autoneg;
2580                         bp->advertising = advertising;
2581
2582                         bnx2_set_mac_addr(bp);
2583
2584                         val = REG_RD(bp, BNX2_EMAC_MODE);
2585
2586                         /* Enable port mode. */
2587                         val &= ~BNX2_EMAC_MODE_PORT;
2588                         val |= BNX2_EMAC_MODE_PORT_MII |
2589                                BNX2_EMAC_MODE_MPKT_RCVD |
2590                                BNX2_EMAC_MODE_ACPI_RCVD |
2591                                BNX2_EMAC_MODE_MPKT;
2592
2593                         REG_WR(bp, BNX2_EMAC_MODE, val);
2594
2595                         /* receive all multicast */
2596                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2597                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2598                                        0xffffffff);
2599                         }
2600                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2601                                BNX2_EMAC_RX_MODE_SORT_MODE);
2602
2603                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2604                               BNX2_RPM_SORT_USER0_MC_EN;
2605                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2606                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2607                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2608                                BNX2_RPM_SORT_USER0_ENA);
2609
2610                         /* Need to enable EMAC and RPM for WOL. */
2611                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2612                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2613                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2614                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2615
2616                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2617                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2618                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2619
2620                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2621                 }
2622                 else {
2623                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2624                 }
2625
2626                 if (!(bp->flags & NO_WOL_FLAG))
2627                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2628
2629                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2630                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2631                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2632
2633                         if (bp->wol)
2634                                 pmcsr |= 3;
2635                 }
2636                 else {
2637                         pmcsr |= 3;
2638                 }
2639                 if (bp->wol) {
2640                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2641                 }
2642                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2643                                       pmcsr);
2644
2645                 /* No more memory access after this point until
2646                  * device is brought back to D0.
2647                  */
2648                 udelay(50);
2649                 break;
2650         }
2651         default:
2652                 return -EINVAL;
2653         }
2654         return 0;
2655 }
2656
2657 static int
2658 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2659 {
2660         u32 val;
2661         int j;
2662
2663         /* Request access to the flash interface. */
2664         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2665         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2666                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2667                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2668                         break;
2669
2670                 udelay(5);
2671         }
2672
2673         if (j >= NVRAM_TIMEOUT_COUNT)
2674                 return -EBUSY;
2675
2676         return 0;
2677 }
2678
2679 static int
2680 bnx2_release_nvram_lock(struct bnx2 *bp)
2681 {
2682         int j;
2683         u32 val;
2684
2685         /* Relinquish nvram interface. */
2686         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2687
2688         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2689                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2690                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2691                         break;
2692
2693                 udelay(5);
2694         }
2695
2696         if (j >= NVRAM_TIMEOUT_COUNT)
2697                 return -EBUSY;
2698
2699         return 0;
2700 }
2701
2702
2703 static int
2704 bnx2_enable_nvram_write(struct bnx2 *bp)
2705 {
2706         u32 val;
2707
2708         val = REG_RD(bp, BNX2_MISC_CFG);
2709         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2710
2711         if (!bp->flash_info->buffered) {
2712                 int j;
2713
2714                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2715                 REG_WR(bp, BNX2_NVM_COMMAND,
2716                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2717
2718                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2719                         udelay(5);
2720
2721                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2722                         if (val & BNX2_NVM_COMMAND_DONE)
2723                                 break;
2724                 }
2725
2726                 if (j >= NVRAM_TIMEOUT_COUNT)
2727                         return -EBUSY;
2728         }
2729         return 0;
2730 }
2731
2732 static void
2733 bnx2_disable_nvram_write(struct bnx2 *bp)
2734 {
2735         u32 val;
2736
2737         val = REG_RD(bp, BNX2_MISC_CFG);
2738         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2739 }
2740
2741
2742 static void
2743 bnx2_enable_nvram_access(struct bnx2 *bp)
2744 {
2745         u32 val;
2746
2747         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2748         /* Enable both bits, even on read. */
2749         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2750                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2751 }
2752
2753 static void
2754 bnx2_disable_nvram_access(struct bnx2 *bp)
2755 {
2756         u32 val;
2757
2758         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2759         /* Disable both bits, even after read. */
2760         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2761                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2762                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2763 }
2764
2765 static int
2766 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2767 {
2768         u32 cmd;
2769         int j;
2770
2771         if (bp->flash_info->buffered)
2772                 /* Buffered flash, no erase needed */
2773                 return 0;
2774
2775         /* Build an erase command */
2776         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2777               BNX2_NVM_COMMAND_DOIT;
2778
2779         /* Need to clear DONE bit separately. */
2780         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2781
2782         /* Address of the NVRAM to read from. */
2783         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2784
2785         /* Issue an erase command. */
2786         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2787
2788         /* Wait for completion. */
2789         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2790                 u32 val;
2791
2792                 udelay(5);
2793
2794                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2795                 if (val & BNX2_NVM_COMMAND_DONE)
2796                         break;
2797         }
2798
2799         if (j >= NVRAM_TIMEOUT_COUNT)
2800                 return -EBUSY;
2801
2802         return 0;
2803 }
2804
2805 static int
2806 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2807 {
2808         u32 cmd;
2809         int j;
2810
2811         /* Build the command word. */
2812         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2813
2814         /* Calculate an offset of a buffered flash. */
2815         if (bp->flash_info->buffered) {
2816                 offset = ((offset / bp->flash_info->page_size) <<
2817                            bp->flash_info->page_bits) +
2818                           (offset % bp->flash_info->page_size);
2819         }
2820
2821         /* Need to clear DONE bit separately. */
2822         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2823
2824         /* Address of the NVRAM to read from. */
2825         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2826
2827         /* Issue a read command. */
2828         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2829
2830         /* Wait for completion. */
2831         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2832                 u32 val;
2833
2834                 udelay(5);
2835
2836                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2837                 if (val & BNX2_NVM_COMMAND_DONE) {
2838                         val = REG_RD(bp, BNX2_NVM_READ);
2839
2840                         val = be32_to_cpu(val);
2841                         memcpy(ret_val, &val, 4);
2842                         break;
2843                 }
2844         }
2845         if (j >= NVRAM_TIMEOUT_COUNT)
2846                 return -EBUSY;
2847
2848         return 0;
2849 }
2850
2851
2852 static int
2853 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2854 {
2855         u32 cmd, val32;
2856         int j;
2857
2858         /* Build the command word. */
2859         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2860
2861         /* Calculate an offset of a buffered flash. */
2862         if (bp->flash_info->buffered) {
2863                 offset = ((offset / bp->flash_info->page_size) <<
2864                           bp->flash_info->page_bits) +
2865                          (offset % bp->flash_info->page_size);
2866         }
2867
2868         /* Need to clear DONE bit separately. */
2869         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2870
2871         memcpy(&val32, val, 4);
2872         val32 = cpu_to_be32(val32);
2873
2874         /* Write the data. */
2875         REG_WR(bp, BNX2_NVM_WRITE, val32);
2876
2877         /* Address of the NVRAM to write to. */
2878         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2879
2880         /* Issue the write command. */
2881         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2882
2883         /* Wait for completion. */
2884         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2885                 udelay(5);
2886
2887                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2888                         break;
2889         }
2890         if (j >= NVRAM_TIMEOUT_COUNT)
2891                 return -EBUSY;
2892
2893         return 0;
2894 }
2895
2896 static int
2897 bnx2_init_nvram(struct bnx2 *bp)
2898 {
2899         u32 val;
2900         int j, entry_count, rc;
2901         struct flash_spec *flash;
2902
2903         /* Determine the selected interface. */
2904         val = REG_RD(bp, BNX2_NVM_CFG1);
2905
2906         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2907
2908         rc = 0;
2909         if (val & 0x40000000) {
2910
2911                 /* Flash interface has been reconfigured */
2912                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2913                      j++, flash++) {
2914                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2915                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2916                                 bp->flash_info = flash;
2917                                 break;
2918                         }
2919                 }
2920         }
2921         else {
2922                 u32 mask;
2923                 /* Not yet been reconfigured */
2924
2925                 if (val & (1 << 23))
2926                         mask = FLASH_BACKUP_STRAP_MASK;
2927                 else
2928                         mask = FLASH_STRAP_MASK;
2929
2930                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2931                         j++, flash++) {
2932
2933                         if ((val & mask) == (flash->strapping & mask)) {
2934                                 bp->flash_info = flash;
2935
2936                                 /* Request access to the flash interface. */
2937                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2938                                         return rc;
2939
2940                                 /* Enable access to flash interface */
2941                                 bnx2_enable_nvram_access(bp);
2942
2943                                 /* Reconfigure the flash interface */
2944                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2945                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2946                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2947                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2948
2949                                 /* Disable access to flash interface */
2950                                 bnx2_disable_nvram_access(bp);
2951                                 bnx2_release_nvram_lock(bp);
2952
2953                                 break;
2954                         }
2955                 }
2956         } /* if (val & 0x40000000) */
2957
2958         if (j == entry_count) {
2959                 bp->flash_info = NULL;
2960                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2961                 return -ENODEV;
2962         }
2963
2964         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2965         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2966         if (val)
2967                 bp->flash_size = val;
2968         else
2969                 bp->flash_size = bp->flash_info->total_size;
2970
2971         return rc;
2972 }
2973
2974 static int
2975 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2976                 int buf_size)
2977 {
2978         int rc = 0;
2979         u32 cmd_flags, offset32, len32, extra;
2980
2981         if (buf_size == 0)
2982                 return 0;
2983
2984         /* Request access to the flash interface. */
2985         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2986                 return rc;
2987
2988         /* Enable access to flash interface */
2989         bnx2_enable_nvram_access(bp);
2990
2991         len32 = buf_size;
2992         offset32 = offset;
2993         extra = 0;
2994
2995         cmd_flags = 0;
2996
2997         if (offset32 & 3) {
2998                 u8 buf[4];
2999                 u32 pre_len;
3000
3001                 offset32 &= ~3;
3002                 pre_len = 4 - (offset & 3);
3003
3004                 if (pre_len >= len32) {
3005                         pre_len = len32;
3006                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3007                                     BNX2_NVM_COMMAND_LAST;
3008                 }
3009                 else {
3010                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3011                 }
3012
3013                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3014
3015                 if (rc)
3016                         return rc;
3017
3018                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3019
3020                 offset32 += 4;
3021                 ret_buf += pre_len;
3022                 len32 -= pre_len;
3023         }
3024         if (len32 & 3) {
3025                 extra = 4 - (len32 & 3);
3026                 len32 = (len32 + 4) & ~3;
3027         }
3028
3029         if (len32 == 4) {
3030                 u8 buf[4];
3031
3032                 if (cmd_flags)
3033                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3034                 else
3035                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3036                                     BNX2_NVM_COMMAND_LAST;
3037
3038                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3039
3040                 memcpy(ret_buf, buf, 4 - extra);
3041         }
3042         else if (len32 > 0) {
3043                 u8 buf[4];
3044
3045                 /* Read the first word. */
3046                 if (cmd_flags)
3047                         cmd_flags = 0;
3048                 else
3049                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3050
3051                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3052
3053                 /* Advance to the next dword. */
3054                 offset32 += 4;
3055                 ret_buf += 4;
3056                 len32 -= 4;
3057
3058                 while (len32 > 4 && rc == 0) {
3059                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3060
3061                         /* Advance to the next dword. */
3062                         offset32 += 4;
3063                         ret_buf += 4;
3064                         len32 -= 4;
3065                 }
3066
3067                 if (rc)
3068                         return rc;
3069
3070                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3071                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3072
3073                 memcpy(ret_buf, buf, 4 - extra);
3074         }
3075
3076         /* Disable access to flash interface */
3077         bnx2_disable_nvram_access(bp);
3078
3079         bnx2_release_nvram_lock(bp);
3080
3081         return rc;
3082 }
3083
3084 static int
3085 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3086                 int buf_size)
3087 {
3088         u32 written, offset32, len32;
3089         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3090         int rc = 0;
3091         int align_start, align_end;
3092
3093         buf = data_buf;
3094         offset32 = offset;
3095         len32 = buf_size;
3096         align_start = align_end = 0;
3097
3098         if ((align_start = (offset32 & 3))) {
3099                 offset32 &= ~3;
3100                 len32 += align_start;
3101                 if (len32 < 4)
3102                         len32 = 4;
3103                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3104                         return rc;
3105         }
3106
3107         if (len32 & 3) {
3108                 align_end = 4 - (len32 & 3);
3109                 len32 += align_end;
3110                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3111                         return rc;
3112         }
3113
3114         if (align_start || align_end) {
3115                 align_buf = kmalloc(len32, GFP_KERNEL);
3116                 if (align_buf == NULL)
3117                         return -ENOMEM;
3118                 if (align_start) {
3119                         memcpy(align_buf, start, 4);
3120                 }
3121                 if (align_end) {
3122                         memcpy(align_buf + len32 - 4, end, 4);
3123                 }
3124                 memcpy(align_buf + align_start, data_buf, buf_size);
3125                 buf = align_buf;
3126         }
3127
3128         if (bp->flash_info->buffered == 0) {
3129                 flash_buffer = kmalloc(264, GFP_KERNEL);
3130                 if (flash_buffer == NULL) {
3131                         rc = -ENOMEM;
3132                         goto nvram_write_end;
3133                 }
3134         }
3135
3136         written = 0;
3137         while ((written < len32) && (rc == 0)) {
3138                 u32 page_start, page_end, data_start, data_end;
3139                 u32 addr, cmd_flags;
3140                 int i;
3141
3142                 /* Find the page_start addr */
3143                 page_start = offset32 + written;
3144                 page_start -= (page_start % bp->flash_info->page_size);
3145                 /* Find the page_end addr */
3146                 page_end = page_start + bp->flash_info->page_size;
3147                 /* Find the data_start addr */
3148                 data_start = (written == 0) ? offset32 : page_start;
3149                 /* Find the data_end addr */
3150                 data_end = (page_end > offset32 + len32) ?
3151                         (offset32 + len32) : page_end;
3152
3153                 /* Request access to the flash interface. */
3154                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3155                         goto nvram_write_end;
3156
3157                 /* Enable access to flash interface */
3158                 bnx2_enable_nvram_access(bp);
3159
3160                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3161                 if (bp->flash_info->buffered == 0) {
3162                         int j;
3163
3164                         /* Read the whole page into the buffer
3165                          * (non-buffer flash only) */
3166                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3167                                 if (j == (bp->flash_info->page_size - 4)) {
3168                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3169                                 }
3170                                 rc = bnx2_nvram_read_dword(bp,
3171                                         page_start + j,
3172                                         &flash_buffer[j],
3173                                         cmd_flags);
3174
3175                                 if (rc)
3176                                         goto nvram_write_end;
3177
3178                                 cmd_flags = 0;
3179                         }
3180                 }
3181
3182                 /* Enable writes to flash interface (unlock write-protect) */
3183                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3184                         goto nvram_write_end;
3185
3186                 /* Loop to write back the buffer data from page_start to
3187                  * data_start */
3188                 i = 0;
3189                 if (bp->flash_info->buffered == 0) {
3190                         /* Erase the page */
3191                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3192                                 goto nvram_write_end;
3193
3194                         /* Re-enable the write again for the actual write */
3195                         bnx2_enable_nvram_write(bp);
3196
3197                         for (addr = page_start; addr < data_start;
3198                                 addr += 4, i += 4) {
3199
3200                                 rc = bnx2_nvram_write_dword(bp, addr,
3201                                         &flash_buffer[i], cmd_flags);
3202
3203                                 if (rc != 0)
3204                                         goto nvram_write_end;
3205
3206                                 cmd_flags = 0;
3207                         }
3208                 }
3209
3210                 /* Loop to write the new data from data_start to data_end */
3211                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3212                         if ((addr == page_end - 4) ||
3213                                 ((bp->flash_info->buffered) &&
3214                                  (addr == data_end - 4))) {
3215
3216                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3217                         }
3218                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3219                                 cmd_flags);
3220
3221                         if (rc != 0)
3222                                 goto nvram_write_end;
3223
3224                         cmd_flags = 0;
3225                         buf += 4;
3226                 }
3227
3228                 /* Loop to write back the buffer data from data_end
3229                  * to page_end */
3230                 if (bp->flash_info->buffered == 0) {
3231                         for (addr = data_end; addr < page_end;
3232                                 addr += 4, i += 4) {
3233
3234                                 if (addr == page_end-4) {
3235                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3236                                 }
3237                                 rc = bnx2_nvram_write_dword(bp, addr,
3238                                         &flash_buffer[i], cmd_flags);
3239
3240                                 if (rc != 0)
3241                                         goto nvram_write_end;
3242
3243                                 cmd_flags = 0;
3244                         }
3245                 }
3246
3247                 /* Disable writes to flash interface (lock write-protect) */
3248                 bnx2_disable_nvram_write(bp);
3249
3250                 /* Disable access to flash interface */
3251                 bnx2_disable_nvram_access(bp);
3252                 bnx2_release_nvram_lock(bp);
3253
3254                 /* Increment written */
3255                 written += data_end - data_start;
3256         }
3257
3258 nvram_write_end:
3259         kfree(flash_buffer);
3260         kfree(align_buf);
3261         return rc;
3262 }
3263
3264 static int
3265 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3266 {
3267         u32 val;
3268         int i, rc = 0;
3269
3270         /* Wait for the current PCI transaction to complete before
3271          * issuing a reset. */
3272         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3273                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3274                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3275                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3276                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3277         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3278         udelay(5);
3279
3280         /* Wait for the firmware to tell us it is ok to issue a reset. */
3281         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3282
3283         /* Deposit a driver reset signature so the firmware knows that
3284          * this is a soft reset. */
3285         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3286                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3287
3288         /* Do a dummy read to force the chip to complete all current transaction
3289          * before we issue a reset. */
3290         val = REG_RD(bp, BNX2_MISC_ID);
3291
3292         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3293                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3294                 REG_RD(bp, BNX2_MISC_COMMAND);
3295                 udelay(5);
3296
3297                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3298                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3299
3300                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3301
3302         } else {
3303                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3304                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3305                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3306
3307                 /* Chip reset. */
3308                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3309
3310                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3311                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3312                         current->state = TASK_UNINTERRUPTIBLE;
3313                         schedule_timeout(HZ / 50);
3314                 }
3315
3316                 /* Reset takes approximate 30 usec */
3317                 for (i = 0; i < 10; i++) {
3318                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3319                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3320                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3321                                 break;
3322                         udelay(10);
3323                 }
3324
3325                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3326                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3327                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3328                         return -EBUSY;
3329                 }
3330         }
3331
3332         /* Make sure byte swapping is properly configured. */
3333         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3334         if (val != 0x01020304) {
3335                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3336                 return -ENODEV;
3337         }
3338
3339         /* Wait for the firmware to finish its initialization. */
3340         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3341         if (rc)
3342                 return rc;
3343
3344         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3345                 /* Adjust the voltage regular to two steps lower.  The default
3346                  * of this register is 0x0000000e. */
3347                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3348
3349                 /* Remove bad rbuf memory from the free pool. */
3350                 rc = bnx2_alloc_bad_rbuf(bp);
3351         }
3352
3353         return rc;
3354 }
3355
3356 static int
3357 bnx2_init_chip(struct bnx2 *bp)
3358 {
3359         u32 val;
3360         int rc;
3361
3362         /* Make sure the interrupt is not active. */
3363         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3364
3365         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3366               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3367 #ifdef __BIG_ENDIAN
3368               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3369 #endif
3370               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3371               DMA_READ_CHANS << 12 |
3372               DMA_WRITE_CHANS << 16;
3373
3374         val |= (0x2 << 20) | (1 << 11);
3375
3376         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3377                 val |= (1 << 23);
3378
3379         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3380             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3381                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3382
3383         REG_WR(bp, BNX2_DMA_CONFIG, val);
3384
3385         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3386                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3387                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3388                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3389         }
3390
3391         if (bp->flags & PCIX_FLAG) {
3392                 u16 val16;
3393
3394                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3395                                      &val16);
3396                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3397                                       val16 & ~PCI_X_CMD_ERO);
3398         }
3399
3400         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3401                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3402                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3403                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3404
3405         /* Initialize context mapping and zero out the quick contexts.  The
3406          * context block must have already been enabled. */
3407         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3408                 bnx2_init_5709_context(bp);
3409         else
3410                 bnx2_init_context(bp);
3411
3412         if ((rc = bnx2_init_cpus(bp)) != 0)
3413                 return rc;
3414
3415         bnx2_init_nvram(bp);
3416
3417         bnx2_set_mac_addr(bp);
3418
3419         val = REG_RD(bp, BNX2_MQ_CONFIG);
3420         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3421         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3422         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3423                 val |= BNX2_MQ_CONFIG_HALT_DIS;
3424
3425         REG_WR(bp, BNX2_MQ_CONFIG, val);
3426
3427         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3428         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3429         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3430
3431         val = (BCM_PAGE_BITS - 8) << 24;
3432         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3433
3434         /* Configure page size. */
3435         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3436         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3437         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3438         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3439
3440         val = bp->mac_addr[0] +
3441               (bp->mac_addr[1] << 8) +
3442               (bp->mac_addr[2] << 16) +
3443               bp->mac_addr[3] +
3444               (bp->mac_addr[4] << 8) +
3445               (bp->mac_addr[5] << 16);
3446         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3447
3448         /* Program the MTU.  Also include 4 bytes for CRC32. */
3449         val = bp->dev->mtu + ETH_HLEN + 4;
3450         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3451                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3452         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3453
3454         bp->last_status_idx = 0;
3455         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3456
3457         /* Set up how to generate a link change interrupt. */
3458         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3459
3460         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3461                (u64) bp->status_blk_mapping & 0xffffffff);
3462         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3463
3464         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3465                (u64) bp->stats_blk_mapping & 0xffffffff);
3466         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3467                (u64) bp->stats_blk_mapping >> 32);
3468
3469         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3470                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3471
3472         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3473                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3474
3475         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3476                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3477
3478         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3479
3480         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3481
3482         REG_WR(bp, BNX2_HC_COM_TICKS,
3483                (bp->com_ticks_int << 16) | bp->com_ticks);
3484
3485         REG_WR(bp, BNX2_HC_CMD_TICKS,
3486                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3487
3488         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3489         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3490
3491         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3492                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3493         else {
3494                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3495                        BNX2_HC_CONFIG_TX_TMR_MODE |
3496                        BNX2_HC_CONFIG_COLLECT_STATS);
3497         }
3498
3499         /* Clear internal stats counters. */
3500         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3501
3502         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3503
3504         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3505             BNX2_PORT_FEATURE_ASF_ENABLED)
3506                 bp->flags |= ASF_ENABLE_FLAG;
3507
3508         /* Initialize the receive filter. */
3509         bnx2_set_rx_mode(bp->dev);
3510
3511         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3512                           0);
3513
3514         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3515         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3516
3517         udelay(20);
3518
3519         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3520
3521         return rc;
3522 }
3523
3524 static void
3525 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3526 {
3527         u32 val, offset0, offset1, offset2, offset3;
3528
3529         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3530                 offset0 = BNX2_L2CTX_TYPE_XI;
3531                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3532                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3533                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3534         } else {
3535                 offset0 = BNX2_L2CTX_TYPE;
3536                 offset1 = BNX2_L2CTX_CMD_TYPE;
3537                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3538                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3539         }
3540         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3541         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3542
3543         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3544         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3545
3546         val = (u64) bp->tx_desc_mapping >> 32;
3547         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3548
3549         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3550         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3551 }
3552
3553 static void
3554 bnx2_init_tx_ring(struct bnx2 *bp)
3555 {
3556         struct tx_bd *txbd;
3557         u32 cid;
3558
3559         bp->tx_wake_thresh = bp->tx_ring_size / 2;
3560
3561         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3562
3563         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3564         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3565
3566         bp->tx_prod = 0;
3567         bp->tx_cons = 0;
3568         bp->hw_tx_cons = 0;
3569         bp->tx_prod_bseq = 0;
3570
3571         cid = TX_CID;
3572         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3573         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3574
3575         bnx2_init_tx_context(bp, cid);
3576 }
3577
3578 static void
3579 bnx2_init_rx_ring(struct bnx2 *bp)
3580 {
3581         struct rx_bd *rxbd;
3582         int i;
3583         u16 prod, ring_prod;
3584         u32 val;
3585
3586         /* 8 for CRC and VLAN */
3587         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3588         /* hw alignment */
3589         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3590
3591         ring_prod = prod = bp->rx_prod = 0;
3592         bp->rx_cons = 0;
3593         bp->hw_rx_cons = 0;
3594         bp->rx_prod_bseq = 0;
3595
3596         for (i = 0; i < bp->rx_max_ring; i++) {
3597                 int j;
3598
3599                 rxbd = &bp->rx_desc_ring[i][0];
3600                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3601                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3602                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3603                 }
3604                 if (i == (bp->rx_max_ring - 1))
3605                         j = 0;
3606                 else
3607                         j = i + 1;
3608                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3609                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3610                                        0xffffffff;
3611         }
3612
3613         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3614         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3615         val |= 0x02 << 8;
3616         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3617
3618         val = (u64) bp->rx_desc_mapping[0] >> 32;
3619         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3620
3621         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3622         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3623
3624         for (i = 0; i < bp->rx_ring_size; i++) {
3625                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3626                         break;
3627                 }
3628                 prod = NEXT_RX_BD(prod);
3629                 ring_prod = RX_RING_IDX(prod);
3630         }
3631         bp->rx_prod = prod;
3632
3633         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3634
3635         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3636 }
3637
3638 static void
3639 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3640 {
3641         u32 num_rings, max;
3642
3643         bp->rx_ring_size = size;
3644         num_rings = 1;
3645         while (size > MAX_RX_DESC_CNT) {
3646                 size -= MAX_RX_DESC_CNT;
3647                 num_rings++;
3648         }
3649         /* round to next power of 2 */
3650         max = MAX_RX_RINGS;
3651         while ((max & num_rings) == 0)
3652                 max >>= 1;
3653
3654         if (num_rings != max)
3655                 max <<= 1;
3656
3657         bp->rx_max_ring = max;
3658         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3659 }
3660
3661 static void
3662 bnx2_free_tx_skbs(struct bnx2 *bp)
3663 {
3664         int i;
3665
3666         if (bp->tx_buf_ring == NULL)
3667                 return;
3668
3669         for (i = 0; i < TX_DESC_CNT; ) {
3670                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3671                 struct sk_buff *skb = tx_buf->skb;
3672                 int j, last;
3673
3674                 if (skb == NULL) {
3675                         i++;
3676                         continue;
3677                 }
3678
3679                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3680                         skb_headlen(skb), PCI_DMA_TODEVICE);
3681
3682                 tx_buf->skb = NULL;
3683
3684                 last = skb_shinfo(skb)->nr_frags;
3685                 for (j = 0; j < last; j++) {
3686                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3687                         pci_unmap_page(bp->pdev,
3688                                 pci_unmap_addr(tx_buf, mapping),
3689                                 skb_shinfo(skb)->frags[j].size,
3690                                 PCI_DMA_TODEVICE);
3691                 }
3692                 dev_kfree_skb(skb);
3693                 i += j + 1;
3694         }
3695
3696 }
3697
3698 static void
3699 bnx2_free_rx_skbs(struct bnx2 *bp)
3700 {
3701         int i;
3702
3703         if (bp->rx_buf_ring == NULL)
3704                 return;
3705
3706         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3707                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3708                 struct sk_buff *skb = rx_buf->skb;
3709
3710                 if (skb == NULL)
3711                         continue;
3712
3713                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3714                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3715
3716                 rx_buf->skb = NULL;
3717
3718                 dev_kfree_skb(skb);
3719         }
3720 }
3721
3722 static void
3723 bnx2_free_skbs(struct bnx2 *bp)
3724 {
3725         bnx2_free_tx_skbs(bp);
3726         bnx2_free_rx_skbs(bp);
3727 }
3728
3729 static int
3730 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3731 {
3732         int rc;
3733
3734         rc = bnx2_reset_chip(bp, reset_code);
3735         bnx2_free_skbs(bp);
3736         if (rc)
3737                 return rc;
3738
3739         if ((rc = bnx2_init_chip(bp)) != 0)
3740                 return rc;
3741
3742         bnx2_init_tx_ring(bp);
3743         bnx2_init_rx_ring(bp);
3744         return 0;
3745 }
3746
3747 static int
3748 bnx2_init_nic(struct bnx2 *bp)
3749 {
3750         int rc;
3751
3752         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3753                 return rc;
3754
3755         spin_lock_bh(&bp->phy_lock);
3756         bnx2_init_phy(bp);
3757         spin_unlock_bh(&bp->phy_lock);
3758         bnx2_set_link(bp);
3759         return 0;
3760 }
3761
3762 static int
3763 bnx2_test_registers(struct bnx2 *bp)
3764 {
3765         int ret;
3766         int i;
3767         static const struct {
3768                 u16   offset;
3769                 u16   flags;
3770                 u32   rw_mask;
3771                 u32   ro_mask;
3772         } reg_tbl[] = {
3773                 { 0x006c, 0, 0x00000000, 0x0000003f },
3774                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3775                 { 0x0094, 0, 0x00000000, 0x00000000 },
3776
3777                 { 0x0404, 0, 0x00003f00, 0x00000000 },
3778                 { 0x0418, 0, 0x00000000, 0xffffffff },
3779                 { 0x041c, 0, 0x00000000, 0xffffffff },
3780                 { 0x0420, 0, 0x00000000, 0x80ffffff },
3781                 { 0x0424, 0, 0x00000000, 0x00000000 },
3782                 { 0x0428, 0, 0x00000000, 0x00000001 },
3783                 { 0x0450, 0, 0x00000000, 0x0000ffff },
3784                 { 0x0454, 0, 0x00000000, 0xffffffff },
3785                 { 0x0458, 0, 0x00000000, 0xffffffff },
3786
3787                 { 0x0808, 0, 0x00000000, 0xffffffff },
3788                 { 0x0854, 0, 0x00000000, 0xffffffff },
3789                 { 0x0868, 0, 0x00000000, 0x77777777 },
3790                 { 0x086c, 0, 0x00000000, 0x77777777 },
3791                 { 0x0870, 0, 0x00000000, 0x77777777 },
3792                 { 0x0874, 0, 0x00000000, 0x77777777 },
3793
3794                 { 0x0c00, 0, 0x00000000, 0x00000001 },
3795                 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3796                 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3797
3798                 { 0x1000, 0, 0x00000000, 0x00000001 },
3799                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3800
3801                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3802                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3803                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3804                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3805                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3806                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3807                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3808                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3809                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3810                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3811
3812                 { 0x1800, 0, 0x00000000, 0x00000001 },
3813                 { 0x1804, 0, 0x00000000, 0x00000003 },
3814
3815                 { 0x2800, 0, 0x00000000, 0x00000001 },
3816                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3817                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3818                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3819                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3820                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3821                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3822                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3823                 { 0x2840, 0, 0x00000000, 0xffffffff },
3824                 { 0x2844, 0, 0x00000000, 0xffffffff },
3825                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3826                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3827
3828                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3829                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3830
3831                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3832                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3833                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3834                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3835                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3836                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3837                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3838                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3839                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3840
3841                 { 0x5004, 0, 0x00000000, 0x0000007f },
3842                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3843                 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3844
3845                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3846                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3847                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3848                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3849                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3850                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3851                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3852                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3853                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3854
3855                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3856                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3857                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3858                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3859                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3860                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3861                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3862                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3863                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3864                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3865                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3866                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3867                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3868                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3869                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3870                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3871                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3872                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3873                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3874                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3875                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3876                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3877                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3878
3879                 { 0xffff, 0, 0x00000000, 0x00000000 },
3880         };
3881
3882         ret = 0;
3883         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3884                 u32 offset, rw_mask, ro_mask, save_val, val;
3885
3886                 offset = (u32) reg_tbl[i].offset;
3887                 rw_mask = reg_tbl[i].rw_mask;
3888                 ro_mask = reg_tbl[i].ro_mask;
3889
3890                 save_val = readl(bp->regview + offset);
3891
3892                 writel(0, bp->regview + offset);
3893
3894                 val = readl(bp->regview + offset);
3895                 if ((val & rw_mask) != 0) {
3896                         goto reg_test_err;
3897                 }
3898
3899                 if ((val & ro_mask) != (save_val & ro_mask)) {
3900                         goto reg_test_err;
3901                 }
3902
3903                 writel(0xffffffff, bp->regview + offset);
3904
3905                 val = readl(bp->regview + offset);
3906                 if ((val & rw_mask) != rw_mask) {
3907                         goto reg_test_err;
3908                 }
3909
3910                 if ((val & ro_mask) != (save_val & ro_mask)) {
3911                         goto reg_test_err;
3912                 }
3913
3914                 writel(save_val, bp->regview + offset);
3915                 continue;
3916
3917 reg_test_err:
3918                 writel(save_val, bp->regview + offset);
3919                 ret = -ENODEV;
3920                 break;
3921         }
3922         return ret;
3923 }
3924
3925 static int
3926 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3927 {
3928         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3929                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3930         int i;
3931
3932         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3933                 u32 offset;
3934
3935                 for (offset = 0; offset < size; offset += 4) {
3936
3937                         REG_WR_IND(bp, start + offset, test_pattern[i]);
3938
3939                         if (REG_RD_IND(bp, start + offset) !=
3940                                 test_pattern[i]) {
3941                                 return -ENODEV;
3942                         }
3943                 }
3944         }
3945         return 0;
3946 }
3947
3948 static int
3949 bnx2_test_memory(struct bnx2 *bp)
3950 {
3951         int ret = 0;
3952         int i;
3953         static const struct {
3954                 u32   offset;
3955                 u32   len;
3956         } mem_tbl[] = {
3957                 { 0x60000,  0x4000 },
3958                 { 0xa0000,  0x3000 },
3959                 { 0xe0000,  0x4000 },
3960                 { 0x120000, 0x4000 },
3961                 { 0x1a0000, 0x4000 },
3962                 { 0x160000, 0x4000 },
3963                 { 0xffffffff, 0    },
3964         };
3965
3966         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3967                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3968                         mem_tbl[i].len)) != 0) {
3969                         return ret;
3970                 }
3971         }
3972
3973         return ret;
3974 }
3975
3976 #define BNX2_MAC_LOOPBACK       0
3977 #define BNX2_PHY_LOOPBACK       1
3978
3979 static int
3980 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3981 {
3982         unsigned int pkt_size, num_pkts, i;
3983         struct sk_buff *skb, *rx_skb;
3984         unsigned char *packet;
3985         u16 rx_start_idx, rx_idx;
3986         dma_addr_t map;
3987         struct tx_bd *txbd;
3988         struct sw_bd *rx_buf;
3989         struct l2_fhdr *rx_hdr;
3990         int ret = -ENODEV;
3991
3992         if (loopback_mode == BNX2_MAC_LOOPBACK) {
3993                 bp->loopback = MAC_LOOPBACK;
3994                 bnx2_set_mac_loopback(bp);
3995         }
3996         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3997                 bp->loopback = PHY_LOOPBACK;
3998                 bnx2_set_phy_loopback(bp);
3999         }
4000         else
4001                 return -EINVAL;
4002
4003         pkt_size = 1514;
4004         skb = netdev_alloc_skb(bp->dev, pkt_size);
4005         if (!skb)
4006                 return -ENOMEM;
4007         packet = skb_put(skb, pkt_size);
4008         memcpy(packet, bp->dev->dev_addr, 6);
4009         memset(packet + 6, 0x0, 8);
4010         for (i = 14; i < pkt_size; i++)
4011                 packet[i] = (unsigned char) (i & 0xff);
4012
4013         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4014                 PCI_DMA_TODEVICE);
4015
4016         REG_WR(bp, BNX2_HC_COMMAND,
4017                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4018
4019         REG_RD(bp, BNX2_HC_COMMAND);
4020
4021         udelay(5);
4022         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4023
4024         num_pkts = 0;
4025
4026         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4027
4028         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4029         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4030         txbd->tx_bd_mss_nbytes = pkt_size;
4031         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4032
4033         num_pkts++;
4034         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4035         bp->tx_prod_bseq += pkt_size;
4036
4037         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4038         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4039
4040         udelay(100);
4041
4042         REG_WR(bp, BNX2_HC_COMMAND,
4043                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4044
4045         REG_RD(bp, BNX2_HC_COMMAND);
4046
4047         udelay(5);
4048
4049         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4050         dev_kfree_skb(skb);
4051
4052         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4053                 goto loopback_test_done;
4054         }
4055
4056         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4057         if (rx_idx != rx_start_idx + num_pkts) {
4058                 goto loopback_test_done;
4059         }
4060
4061         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4062         rx_skb = rx_buf->skb;
4063
4064         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4065         skb_reserve(rx_skb, bp->rx_offset);
4066
4067         pci_dma_sync_single_for_cpu(bp->pdev,
4068                 pci_unmap_addr(rx_buf, mapping),
4069                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4070
4071         if (rx_hdr->l2_fhdr_status &
4072                 (L2_FHDR_ERRORS_BAD_CRC |
4073                 L2_FHDR_ERRORS_PHY_DECODE |
4074                 L2_FHDR_ERRORS_ALIGNMENT |
4075                 L2_FHDR_ERRORS_TOO_SHORT |
4076                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4077
4078                 goto loopback_test_done;
4079         }
4080
4081         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4082                 goto loopback_test_done;
4083         }
4084
4085         for (i = 14; i < pkt_size; i++) {
4086                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4087                         goto loopback_test_done;
4088                 }
4089         }
4090
4091         ret = 0;
4092
4093 loopback_test_done:
4094         bp->loopback = 0;
4095         return ret;
4096 }
4097
4098 #define BNX2_MAC_LOOPBACK_FAILED        1
4099 #define BNX2_PHY_LOOPBACK_FAILED        2
4100 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4101                                          BNX2_PHY_LOOPBACK_FAILED)
4102
4103 static int
4104 bnx2_test_loopback(struct bnx2 *bp)
4105 {
4106         int rc = 0;
4107
4108         if (!netif_running(bp->dev))
4109                 return BNX2_LOOPBACK_FAILED;
4110
4111         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4112         spin_lock_bh(&bp->phy_lock);
4113         bnx2_init_phy(bp);
4114         spin_unlock_bh(&bp->phy_lock);
4115         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4116                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4117         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4118                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4119         return rc;
4120 }
4121
4122 #define NVRAM_SIZE 0x200
4123 #define CRC32_RESIDUAL 0xdebb20e3
4124
4125 static int
4126 bnx2_test_nvram(struct bnx2 *bp)
4127 {
4128         u32 buf[NVRAM_SIZE / 4];
4129         u8 *data = (u8 *) buf;
4130         int rc = 0;
4131         u32 magic, csum;
4132
4133         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4134                 goto test_nvram_done;
4135
4136         magic = be32_to_cpu(buf[0]);
4137         if (magic != 0x669955aa) {
4138                 rc = -ENODEV;
4139                 goto test_nvram_done;
4140         }
4141
4142         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4143                 goto test_nvram_done;
4144
4145         csum = ether_crc_le(0x100, data);
4146         if (csum != CRC32_RESIDUAL) {
4147                 rc = -ENODEV;
4148                 goto test_nvram_done;
4149         }
4150
4151         csum = ether_crc_le(0x100, data + 0x100);
4152         if (csum != CRC32_RESIDUAL) {
4153                 rc = -ENODEV;
4154         }
4155
4156 test_nvram_done:
4157         return rc;
4158 }
4159
4160 static int
4161 bnx2_test_link(struct bnx2 *bp)
4162 {
4163         u32 bmsr;
4164
4165         spin_lock_bh(&bp->phy_lock);
4166         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4167         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4168         spin_unlock_bh(&bp->phy_lock);
4169
4170         if (bmsr & BMSR_LSTATUS) {
4171                 return 0;
4172         }
4173         return -ENODEV;
4174 }
4175
4176 static int
4177 bnx2_test_intr(struct bnx2 *bp)
4178 {
4179         int i;
4180         u16 status_idx;
4181
4182         if (!netif_running(bp->dev))
4183                 return -ENODEV;
4184
4185         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4186
4187         /* This register is not touched during run-time. */
4188         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4189         REG_RD(bp, BNX2_HC_COMMAND);
4190
4191         for (i = 0; i < 10; i++) {
4192                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4193                         status_idx) {
4194
4195                         break;
4196                 }
4197
4198                 msleep_interruptible(10);
4199         }
4200         if (i < 10)
4201                 return 0;
4202
4203         return -ENODEV;
4204 }
4205
4206 static void
4207 bnx2_5706_serdes_timer(struct bnx2 *bp)
4208 {
4209         spin_lock(&bp->phy_lock);
4210         if (bp->serdes_an_pending)
4211                 bp->serdes_an_pending--;
4212         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4213                 u32 bmcr;
4214
4215                 bp->current_interval = bp->timer_interval;
4216
4217                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4218
4219                 if (bmcr & BMCR_ANENABLE) {
4220                         u32 phy1, phy2;
4221
4222                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4223                         bnx2_read_phy(bp, 0x1c, &phy1);
4224
4225                         bnx2_write_phy(bp, 0x17, 0x0f01);
4226                         bnx2_read_phy(bp, 0x15, &phy2);
4227                         bnx2_write_phy(bp, 0x17, 0x0f01);
4228                         bnx2_read_phy(bp, 0x15, &phy2);
4229
4230                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4231                                 !(phy2 & 0x20)) {       /* no CONFIG */
4232
4233                                 bmcr &= ~BMCR_ANENABLE;
4234                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4235                                 bnx2_write_phy(bp, MII_BMCR, bmcr);
4236                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4237                         }
4238                 }
4239         }
4240         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4241                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4242                 u32 phy2;
4243
4244                 bnx2_write_phy(bp, 0x17, 0x0f01);
4245                 bnx2_read_phy(bp, 0x15, &phy2);
4246                 if (phy2 & 0x20) {
4247                         u32 bmcr;
4248
4249                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4250                         bmcr |= BMCR_ANENABLE;
4251                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4252
4253                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4254                 }
4255         } else
4256                 bp->current_interval = bp->timer_interval;
4257
4258         spin_unlock(&bp->phy_lock);
4259 }
4260
4261 static void
4262 bnx2_5708_serdes_timer(struct bnx2 *bp)
4263 {
4264         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4265                 bp->serdes_an_pending = 0;
4266                 return;
4267         }
4268
4269         spin_lock(&bp->phy_lock);
4270         if (bp->serdes_an_pending)
4271                 bp->serdes_an_pending--;
4272         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4273                 u32 bmcr;
4274
4275                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4276
4277                 if (bmcr & BMCR_ANENABLE) {
4278                         bmcr &= ~BMCR_ANENABLE;
4279                         bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4280                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4281                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4282                 } else {
4283                         bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4284                         bmcr |= BMCR_ANENABLE;
4285                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4286                         bp->serdes_an_pending = 2;
4287                         bp->current_interval = bp->timer_interval;
4288                 }
4289
4290         } else
4291                 bp->current_interval = bp->timer_interval;
4292
4293         spin_unlock(&bp->phy_lock);
4294 }
4295
4296 static void
4297 bnx2_timer(unsigned long data)
4298 {
4299         struct bnx2 *bp = (struct bnx2 *) data;
4300         u32 msg;
4301
4302         if (!netif_running(bp->dev))
4303                 return;
4304
4305         if (atomic_read(&bp->intr_sem) != 0)
4306                 goto bnx2_restart_timer;
4307
4308         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4309         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4310
4311         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4312
4313         if (bp->phy_flags & PHY_SERDES_FLAG) {
4314                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4315                         bnx2_5706_serdes_timer(bp);
4316                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4317                         bnx2_5708_serdes_timer(bp);
4318         }
4319
4320 bnx2_restart_timer:
4321         mod_timer(&bp->timer, jiffies + bp->current_interval);
4322 }
4323
4324 /* Called with rtnl_lock */
4325 static int
4326 bnx2_open(struct net_device *dev)
4327 {
4328         struct bnx2 *bp = netdev_priv(dev);
4329         int rc;
4330
4331         bnx2_set_power_state(bp, PCI_D0);
4332         bnx2_disable_int(bp);
4333
4334         rc = bnx2_alloc_mem(bp);
4335         if (rc)
4336                 return rc;
4337
4338         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4339                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4340                 !disable_msi) {
4341
4342                 if (pci_enable_msi(bp->pdev) == 0) {
4343                         bp->flags |= USING_MSI_FLAG;
4344                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4345                                         dev);
4346                 }
4347                 else {
4348                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4349                                         IRQF_SHARED, dev->name, dev);
4350                 }
4351         }
4352         else {
4353                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4354                                 dev->name, dev);
4355         }
4356         if (rc) {
4357                 bnx2_free_mem(bp);
4358                 return rc;
4359         }
4360
4361         rc = bnx2_init_nic(bp);
4362
4363         if (rc) {
4364                 free_irq(bp->pdev->irq, dev);
4365                 if (bp->flags & USING_MSI_FLAG) {
4366                         pci_disable_msi(bp->pdev);
4367                         bp->flags &= ~USING_MSI_FLAG;
4368                 }
4369                 bnx2_free_skbs(bp);
4370                 bnx2_free_mem(bp);
4371                 return rc;
4372         }
4373
4374         mod_timer(&bp->timer, jiffies + bp->current_interval);
4375
4376         atomic_set(&bp->intr_sem, 0);
4377
4378         bnx2_enable_int(bp);
4379
4380         if (bp->flags & USING_MSI_FLAG) {
4381                 /* Test MSI to make sure it is working
4382                  * If MSI test fails, go back to INTx mode
4383                  */
4384                 if (bnx2_test_intr(bp) != 0) {
4385                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4386                                " using MSI, switching to INTx mode. Please"
4387                                " report this failure to the PCI maintainer"
4388                                " and include system chipset information.\n",
4389                                bp->dev->name);
4390
4391                         bnx2_disable_int(bp);
4392                         free_irq(bp->pdev->irq, dev);
4393                         pci_disable_msi(bp->pdev);
4394                         bp->flags &= ~USING_MSI_FLAG;
4395
4396                         rc = bnx2_init_nic(bp);
4397
4398                         if (!rc) {
4399                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4400                                         IRQF_SHARED, dev->name, dev);
4401                         }
4402                         if (rc) {
4403                                 bnx2_free_skbs(bp);
4404                                 bnx2_free_mem(bp);
4405                                 del_timer_sync(&bp->timer);
4406                                 return rc;
4407                         }
4408                         bnx2_enable_int(bp);
4409                 }
4410         }
4411         if (bp->flags & USING_MSI_FLAG) {
4412                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4413         }
4414
4415         netif_start_queue(dev);
4416
4417         return 0;
4418 }
4419
4420 static void
4421 bnx2_reset_task(struct work_struct *work)
4422 {
4423         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4424
4425         if (!netif_running(bp->dev))
4426                 return;
4427
4428         bp->in_reset_task = 1;
4429         bnx2_netif_stop(bp);
4430
4431         bnx2_init_nic(bp);
4432
4433         atomic_set(&bp->intr_sem, 1);
4434         bnx2_netif_start(bp);
4435         bp->in_reset_task = 0;
4436 }
4437
4438 static void
4439 bnx2_tx_timeout(struct net_device *dev)
4440 {
4441         struct bnx2 *bp = netdev_priv(dev);
4442
4443         /* This allows the netif to be shutdown gracefully before resetting */
4444         schedule_work(&bp->reset_task);
4445 }
4446
4447 #ifdef BCM_VLAN
4448 /* Called with rtnl_lock */
4449 static void
4450 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4451 {
4452         struct bnx2 *bp = netdev_priv(dev);
4453
4454         bnx2_netif_stop(bp);
4455
4456         bp->vlgrp = vlgrp;
4457         bnx2_set_rx_mode(dev);
4458
4459         bnx2_netif_start(bp);
4460 }
4461
4462 /* Called with rtnl_lock */
4463 static void
4464 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4465 {
4466         struct bnx2 *bp = netdev_priv(dev);
4467
4468         bnx2_netif_stop(bp);
4469         vlan_group_set_device(bp->vlgrp, vid, NULL);
4470         bnx2_set_rx_mode(dev);
4471
4472         bnx2_netif_start(bp);
4473 }
4474 #endif
4475
4476 /* Called with netif_tx_lock.
4477  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4478  * netif_wake_queue().
4479  */
4480 static int
4481 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4482 {
4483         struct bnx2 *bp = netdev_priv(dev);
4484         dma_addr_t mapping;
4485         struct tx_bd *txbd;
4486         struct sw_bd *tx_buf;
4487         u32 len, vlan_tag_flags, last_frag, mss;
4488         u16 prod, ring_prod;
4489         int i;
4490
4491         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4492                 netif_stop_queue(dev);
4493                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4494                         dev->name);
4495
4496                 return NETDEV_TX_BUSY;
4497         }
4498         len = skb_headlen(skb);
4499         prod = bp->tx_prod;
4500         ring_prod = TX_RING_IDX(prod);
4501
4502         vlan_tag_flags = 0;
4503         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4504                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4505         }
4506
4507         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4508                 vlan_tag_flags |=
4509                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4510         }
4511         if ((mss = skb_shinfo(skb)->gso_size) &&
4512                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4513                 u32 tcp_opt_len, ip_tcp_len;
4514                 struct iphdr *iph;
4515
4516                 if (skb_header_cloned(skb) &&
4517                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4518                         dev_kfree_skb(skb);
4519                         return NETDEV_TX_OK;
4520                 }
4521
4522                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4523
4524                 tcp_opt_len = 0;
4525                 if (tcp_hdr(skb)->doff > 5)
4526                         tcp_opt_len = tcp_optlen(skb);
4527
4528                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4529
4530                 iph = ip_hdr(skb);
4531                 iph->check = 0;
4532                 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4533                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4534                                                          iph->daddr, 0,
4535                                                          IPPROTO_TCP, 0);
4536                 if (tcp_opt_len || (iph->ihl > 5)) {
4537                         vlan_tag_flags |= ((iph->ihl - 5) +
4538                                            (tcp_opt_len >> 2)) << 8;
4539                 }
4540         }
4541         else
4542         {
4543                 mss = 0;
4544         }
4545
4546         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4547
4548         tx_buf = &bp->tx_buf_ring[ring_prod];
4549         tx_buf->skb = skb;
4550         pci_unmap_addr_set(tx_buf, mapping, mapping);
4551
4552         txbd = &bp->tx_desc_ring[ring_prod];
4553
4554         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4555         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4556         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4557         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4558
4559         last_frag = skb_shinfo(skb)->nr_frags;
4560
4561         for (i = 0; i < last_frag; i++) {
4562                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4563
4564                 prod = NEXT_TX_BD(prod);
4565                 ring_prod = TX_RING_IDX(prod);
4566                 txbd = &bp->tx_desc_ring[ring_prod];
4567
4568                 len = frag->size;
4569                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4570                         len, PCI_DMA_TODEVICE);
4571                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4572                                 mapping, mapping);
4573
4574                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4575                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4576                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4577                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4578
4579         }
4580         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4581
4582         prod = NEXT_TX_BD(prod);
4583         bp->tx_prod_bseq += skb->len;
4584
4585         REG_WR16(bp, bp->tx_bidx_addr, prod);
4586         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4587
4588         mmiowb();
4589
4590         bp->tx_prod = prod;
4591         dev->trans_start = jiffies;
4592
4593         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4594                 netif_stop_queue(dev);
4595                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4596                         netif_wake_queue(dev);
4597         }
4598
4599         return NETDEV_TX_OK;
4600 }
4601
4602 /* Called with rtnl_lock */
4603 static int
4604 bnx2_close(struct net_device *dev)
4605 {
4606         struct bnx2 *bp = netdev_priv(dev);
4607         u32 reset_code;
4608
4609         /* Calling flush_scheduled_work() may deadlock because
4610          * linkwatch_event() may be on the workqueue and it will try to get
4611          * the rtnl_lock which we are holding.
4612          */
4613         while (bp->in_reset_task)
4614                 msleep(1);
4615
4616         bnx2_netif_stop(bp);
4617         del_timer_sync(&bp->timer);
4618         if (bp->flags & NO_WOL_FLAG)
4619                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4620         else if (bp->wol)
4621                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4622         else
4623                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4624         bnx2_reset_chip(bp, reset_code);
4625         free_irq(bp->pdev->irq, dev);
4626         if (bp->flags & USING_MSI_FLAG) {
4627                 pci_disable_msi(bp->pdev);
4628                 bp->flags &= ~USING_MSI_FLAG;
4629         }
4630         bnx2_free_skbs(bp);
4631         bnx2_free_mem(bp);
4632         bp->link_up = 0;
4633         netif_carrier_off(bp->dev);
4634         bnx2_set_power_state(bp, PCI_D3hot);
4635         return 0;
4636 }
4637
4638 #define GET_NET_STATS64(ctr)                                    \
4639         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4640         (unsigned long) (ctr##_lo)
4641
4642 #define GET_NET_STATS32(ctr)            \
4643         (ctr##_lo)
4644
4645 #if (BITS_PER_LONG == 64)
4646 #define GET_NET_STATS   GET_NET_STATS64
4647 #else
4648 #define GET_NET_STATS   GET_NET_STATS32
4649 #endif
4650
4651 static struct net_device_stats *
4652 bnx2_get_stats(struct net_device *dev)
4653 {
4654         struct bnx2 *bp = netdev_priv(dev);
4655         struct statistics_block *stats_blk = bp->stats_blk;
4656         struct net_device_stats *net_stats = &bp->net_stats;
4657
4658         if (bp->stats_blk == NULL) {
4659                 return net_stats;
4660         }
4661         net_stats->rx_packets =
4662                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4663                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4664                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4665
4666         net_stats->tx_packets =
4667                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4668                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4669                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4670
4671         net_stats->rx_bytes =
4672                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4673
4674         net_stats->tx_bytes =
4675                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4676
4677         net_stats->multicast =
4678                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4679
4680         net_stats->collisions =
4681                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4682
4683         net_stats->rx_length_errors =
4684                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4685                 stats_blk->stat_EtherStatsOverrsizePkts);
4686
4687         net_stats->rx_over_errors =
4688                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4689
4690         net_stats->rx_frame_errors =
4691                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4692
4693         net_stats->rx_crc_errors =
4694                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4695
4696         net_stats->rx_errors = net_stats->rx_length_errors +
4697                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4698                 net_stats->rx_crc_errors;
4699
4700         net_stats->tx_aborted_errors =
4701                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4702                 stats_blk->stat_Dot3StatsLateCollisions);
4703
4704         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4705             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4706                 net_stats->tx_carrier_errors = 0;
4707         else {
4708                 net_stats->tx_carrier_errors =
4709                         (unsigned long)
4710                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4711         }
4712
4713         net_stats->tx_errors =
4714                 (unsigned long)
4715                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4716                 +
4717                 net_stats->tx_aborted_errors +
4718                 net_stats->tx_carrier_errors;
4719
4720         net_stats->rx_missed_errors =
4721                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4722                 stats_blk->stat_FwRxDrop);
4723
4724         return net_stats;
4725 }
4726
4727 /* All ethtool functions called with rtnl_lock */
4728
4729 static int
4730 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4731 {
4732         struct bnx2 *bp = netdev_priv(dev);
4733
4734         cmd->supported = SUPPORTED_Autoneg;
4735         if (bp->phy_flags & PHY_SERDES_FLAG) {
4736                 cmd->supported |= SUPPORTED_1000baseT_Full |
4737                         SUPPORTED_FIBRE;
4738
4739                 cmd->port = PORT_FIBRE;
4740         }
4741         else {
4742                 cmd->supported |= SUPPORTED_10baseT_Half |
4743                         SUPPORTED_10baseT_Full |
4744                         SUPPORTED_100baseT_Half |
4745                         SUPPORTED_100baseT_Full |
4746                         SUPPORTED_1000baseT_Full |
4747                         SUPPORTED_TP;
4748
4749                 cmd->port = PORT_TP;
4750         }
4751
4752         cmd->advertising = bp->advertising;
4753
4754         if (bp->autoneg & AUTONEG_SPEED) {
4755                 cmd->autoneg = AUTONEG_ENABLE;
4756         }
4757         else {
4758                 cmd->autoneg = AUTONEG_DISABLE;
4759         }
4760
4761         if (netif_carrier_ok(dev)) {
4762                 cmd->speed = bp->line_speed;
4763                 cmd->duplex = bp->duplex;
4764         }
4765         else {
4766                 cmd->speed = -1;
4767                 cmd->duplex = -1;
4768         }
4769
4770         cmd->transceiver = XCVR_INTERNAL;
4771         cmd->phy_address = bp->phy_addr;
4772
4773         return 0;
4774 }
4775
4776 static int
4777 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4778 {
4779         struct bnx2 *bp = netdev_priv(dev);
4780         u8 autoneg = bp->autoneg;
4781         u8 req_duplex = bp->req_duplex;
4782         u16 req_line_speed = bp->req_line_speed;
4783         u32 advertising = bp->advertising;
4784
4785         if (cmd->autoneg == AUTONEG_ENABLE) {
4786                 autoneg |= AUTONEG_SPEED;
4787
4788                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4789
4790                 /* allow advertising 1 speed */
4791                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4792                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4793                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4794                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4795
4796                         if (bp->phy_flags & PHY_SERDES_FLAG)
4797                                 return -EINVAL;
4798
4799                         advertising = cmd->advertising;
4800
4801                 }
4802                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4803                         advertising = cmd->advertising;
4804                 }
4805                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4806                         return -EINVAL;
4807                 }
4808                 else {
4809                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4810                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4811                         }
4812                         else {
4813                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4814                         }
4815                 }
4816                 advertising |= ADVERTISED_Autoneg;
4817         }
4818         else {
4819                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4820                         if ((cmd->speed != SPEED_1000 &&
4821                              cmd->speed != SPEED_2500) ||
4822                             (cmd->duplex != DUPLEX_FULL))
4823                                 return -EINVAL;
4824
4825                         if (cmd->speed == SPEED_2500 &&
4826                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4827                                 return -EINVAL;
4828                 }
4829                 else if (cmd->speed == SPEED_1000) {
4830                         return -EINVAL;
4831                 }
4832                 autoneg &= ~AUTONEG_SPEED;
4833                 req_line_speed = cmd->speed;
4834                 req_duplex = cmd->duplex;
4835                 advertising = 0;
4836         }
4837
4838         bp->autoneg = autoneg;
4839         bp->advertising = advertising;
4840         bp->req_line_speed = req_line_speed;
4841         bp->req_duplex = req_duplex;
4842
4843         spin_lock_bh(&bp->phy_lock);
4844
4845         bnx2_setup_phy(bp);
4846
4847         spin_unlock_bh(&bp->phy_lock);
4848
4849         return 0;
4850 }
4851
4852 static void
4853 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4854 {
4855         struct bnx2 *bp = netdev_priv(dev);
4856
4857         strcpy(info->driver, DRV_MODULE_NAME);
4858         strcpy(info->version, DRV_MODULE_VERSION);
4859         strcpy(info->bus_info, pci_name(bp->pdev));
4860         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4861         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4862         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4863         info->fw_version[1] = info->fw_version[3] = '.';
4864         info->fw_version[5] = 0;
4865 }
4866
4867 #define BNX2_REGDUMP_LEN                (32 * 1024)
4868
4869 static int
4870 bnx2_get_regs_len(struct net_device *dev)
4871 {
4872         return BNX2_REGDUMP_LEN;
4873 }
4874
4875 static void
4876 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4877 {
4878         u32 *p = _p, i, offset;
4879         u8 *orig_p = _p;
4880         struct bnx2 *bp = netdev_priv(dev);
4881         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4882                                  0x0800, 0x0880, 0x0c00, 0x0c10,
4883                                  0x0c30, 0x0d08, 0x1000, 0x101c,
4884                                  0x1040, 0x1048, 0x1080, 0x10a4,
4885                                  0x1400, 0x1490, 0x1498, 0x14f0,
4886                                  0x1500, 0x155c, 0x1580, 0x15dc,
4887                                  0x1600, 0x1658, 0x1680, 0x16d8,
4888                                  0x1800, 0x1820, 0x1840, 0x1854,
4889                                  0x1880, 0x1894, 0x1900, 0x1984,
4890                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4891                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
4892                                  0x2000, 0x2030, 0x23c0, 0x2400,
4893                                  0x2800, 0x2820, 0x2830, 0x2850,
4894                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
4895                                  0x3c00, 0x3c94, 0x4000, 0x4010,
4896                                  0x4080, 0x4090, 0x43c0, 0x4458,
4897                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
4898                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
4899                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
4900                                  0x5fc0, 0x6000, 0x6400, 0x6428,
4901                                  0x6800, 0x6848, 0x684c, 0x6860,
4902                                  0x6888, 0x6910, 0x8000 };
4903
4904         regs->version = 0;
4905
4906         memset(p, 0, BNX2_REGDUMP_LEN);
4907
4908         if (!netif_running(bp->dev))
4909                 return;
4910
4911         i = 0;
4912         offset = reg_boundaries[0];
4913         p += offset;
4914         while (offset < BNX2_REGDUMP_LEN) {
4915                 *p++ = REG_RD(bp, offset);
4916                 offset += 4;
4917                 if (offset == reg_boundaries[i + 1]) {
4918                         offset = reg_boundaries[i + 2];
4919                         p = (u32 *) (orig_p + offset);
4920                         i += 2;
4921                 }
4922         }
4923 }
4924
4925 static void
4926 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4927 {
4928         struct bnx2 *bp = netdev_priv(dev);
4929
4930         if (bp->flags & NO_WOL_FLAG) {
4931                 wol->supported = 0;
4932                 wol->wolopts = 0;
4933         }
4934         else {
4935                 wol->supported = WAKE_MAGIC;
4936                 if (bp->wol)
4937                         wol->wolopts = WAKE_MAGIC;
4938                 else
4939                         wol->wolopts = 0;
4940         }
4941         memset(&wol->sopass, 0, sizeof(wol->sopass));
4942 }
4943
4944 static int
4945 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4946 {
4947         struct bnx2 *bp = netdev_priv(dev);
4948
4949         if (wol->wolopts & ~WAKE_MAGIC)
4950                 return -EINVAL;
4951
4952         if (wol->wolopts & WAKE_MAGIC) {
4953                 if (bp->flags & NO_WOL_FLAG)
4954                         return -EINVAL;
4955
4956                 bp->wol = 1;
4957         }
4958         else {
4959                 bp->wol = 0;
4960         }
4961         return 0;
4962 }
4963
4964 static int
4965 bnx2_nway_reset(struct net_device *dev)
4966 {
4967         struct bnx2 *bp = netdev_priv(dev);
4968         u32 bmcr;
4969
4970         if (!(bp->autoneg & AUTONEG_SPEED)) {
4971                 return -EINVAL;
4972         }
4973
4974         spin_lock_bh(&bp->phy_lock);
4975
4976         /* Force a link down visible on the other side */
4977         if (bp->phy_flags & PHY_SERDES_FLAG) {
4978                 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4979                 spin_unlock_bh(&bp->phy_lock);
4980
4981                 msleep(20);
4982
4983                 spin_lock_bh(&bp->phy_lock);
4984
4985                 bp->current_interval = SERDES_AN_TIMEOUT;
4986                 bp->serdes_an_pending = 1;
4987                 mod_timer(&bp->timer, jiffies + bp->current_interval);
4988         }
4989
4990         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4991         bmcr &= ~BMCR_LOOPBACK;
4992         bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4993
4994         spin_unlock_bh(&bp->phy_lock);
4995
4996         return 0;
4997 }
4998
4999 static int
5000 bnx2_get_eeprom_len(struct net_device *dev)
5001 {
5002         struct bnx2 *bp = netdev_priv(dev);
5003
5004         if (bp->flash_info == NULL)
5005                 return 0;
5006
5007         return (int) bp->flash_size;
5008 }
5009
5010 static int
5011 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5012                 u8 *eebuf)
5013 {
5014         struct bnx2 *bp = netdev_priv(dev);
5015         int rc;
5016
5017         /* parameters already validated in ethtool_get_eeprom */
5018
5019         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5020
5021         return rc;
5022 }
5023
5024 static int
5025 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5026                 u8 *eebuf)
5027 {
5028         struct bnx2 *bp = netdev_priv(dev);
5029         int rc;
5030
5031         /* parameters already validated in ethtool_set_eeprom */
5032
5033         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5034
5035         return rc;
5036 }
5037
5038 static int
5039 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5040 {
5041         struct bnx2 *bp = netdev_priv(dev);
5042
5043         memset(coal, 0, sizeof(struct ethtool_coalesce));
5044
5045         coal->rx_coalesce_usecs = bp->rx_ticks;
5046         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5047         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5048         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5049
5050         coal->tx_coalesce_usecs = bp->tx_ticks;
5051         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5052         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5053         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5054
5055         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5056
5057         return 0;
5058 }
5059
5060 static int
5061 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5062 {
5063         struct bnx2 *bp = netdev_priv(dev);
5064
5065         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5066         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5067
5068         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5069         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5070
5071         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5072         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5073
5074         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5075         if (bp->rx_quick_cons_trip_int > 0xff)
5076                 bp->rx_quick_cons_trip_int = 0xff;
5077
5078         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5079         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5080
5081         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5082         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5083
5084         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5085         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5086
5087         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5088         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5089                 0xff;
5090
5091         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5092         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5093         bp->stats_ticks &= 0xffff00;
5094
5095         if (netif_running(bp->dev)) {
5096                 bnx2_netif_stop(bp);
5097                 bnx2_init_nic(bp);
5098                 bnx2_netif_start(bp);
5099         }
5100
5101         return 0;
5102 }
5103
5104 static void
5105 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5106 {
5107         struct bnx2 *bp = netdev_priv(dev);
5108
5109         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5110         ering->rx_mini_max_pending = 0;
5111         ering->rx_jumbo_max_pending = 0;
5112
5113         ering->rx_pending = bp->rx_ring_size;
5114         ering->rx_mini_pending = 0;
5115         ering->rx_jumbo_pending = 0;
5116
5117         ering->tx_max_pending = MAX_TX_DESC_CNT;
5118         ering->tx_pending = bp->tx_ring_size;
5119 }
5120
5121 static int
5122 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5123 {
5124         struct bnx2 *bp = netdev_priv(dev);
5125
5126         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5127                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5128                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5129
5130                 return -EINVAL;
5131         }
5132         if (netif_running(bp->dev)) {
5133                 bnx2_netif_stop(bp);
5134                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5135                 bnx2_free_skbs(bp);
5136                 bnx2_free_mem(bp);
5137         }
5138
5139         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5140         bp->tx_ring_size = ering->tx_pending;
5141
5142         if (netif_running(bp->dev)) {
5143                 int rc;
5144
5145                 rc = bnx2_alloc_mem(bp);
5146                 if (rc)
5147                         return rc;
5148                 bnx2_init_nic(bp);
5149                 bnx2_netif_start(bp);
5150         }
5151
5152         return 0;
5153 }
5154
5155 static void
5156 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5157 {
5158         struct bnx2 *bp = netdev_priv(dev);
5159
5160         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5161         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5162         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5163 }
5164
5165 static int
5166 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5167 {
5168         struct bnx2 *bp = netdev_priv(dev);
5169
5170         bp->req_flow_ctrl = 0;
5171         if (epause->rx_pause)
5172                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5173         if (epause->tx_pause)
5174                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5175
5176         if (epause->autoneg) {
5177                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5178         }
5179         else {
5180                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5181         }
5182
5183         spin_lock_bh(&bp->phy_lock);
5184
5185         bnx2_setup_phy(bp);
5186
5187         spin_unlock_bh(&bp->phy_lock);
5188
5189         return 0;
5190 }
5191
5192 static u32
5193 bnx2_get_rx_csum(struct net_device *dev)
5194 {
5195         struct bnx2 *bp = netdev_priv(dev);
5196
5197         return bp->rx_csum;
5198 }
5199
5200 static int
5201 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5202 {
5203         struct bnx2 *bp = netdev_priv(dev);
5204
5205         bp->rx_csum = data;
5206         return 0;
5207 }
5208
5209 static int
5210 bnx2_set_tso(struct net_device *dev, u32 data)
5211 {
5212         if (data)
5213                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5214         else
5215                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5216         return 0;
5217 }
5218
5219 #define BNX2_NUM_STATS 46
5220
5221 static struct {
5222         char string[ETH_GSTRING_LEN];
5223 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5224         { "rx_bytes" },
5225         { "rx_error_bytes" },
5226         { "tx_bytes" },
5227         { "tx_error_bytes" },
5228         { "rx_ucast_packets" },
5229         { "rx_mcast_packets" },
5230         { "rx_bcast_packets" },
5231         { "tx_ucast_packets" },
5232         { "tx_mcast_packets" },
5233         { "tx_bcast_packets" },
5234         { "tx_mac_errors" },
5235         { "tx_carrier_errors" },
5236         { "rx_crc_errors" },
5237         { "rx_align_errors" },
5238         { "tx_single_collisions" },
5239         { "tx_multi_collisions" },
5240         { "tx_deferred" },
5241         { "tx_excess_collisions" },
5242         { "tx_late_collisions" },
5243         { "tx_total_collisions" },
5244         { "rx_fragments" },
5245         { "rx_jabbers" },
5246         { "rx_undersize_packets" },
5247         { "rx_oversize_packets" },
5248         { "rx_64_byte_packets" },
5249         { "rx_65_to_127_byte_packets" },
5250         { "rx_128_to_255_byte_packets" },
5251         { "rx_256_to_511_byte_packets" },
5252         { "rx_512_to_1023_byte_packets" },
5253         { "rx_1024_to_1522_byte_packets" },
5254         { "rx_1523_to_9022_byte_packets" },
5255         { "tx_64_byte_packets" },
5256         { "tx_65_to_127_byte_packets" },
5257         { "tx_128_to_255_byte_packets" },
5258         { "tx_256_to_511_byte_packets" },
5259         { "tx_512_to_1023_byte_packets" },
5260         { "tx_1024_to_1522_byte_packets" },
5261         { "tx_1523_to_9022_byte_packets" },
5262         { "rx_xon_frames" },
5263         { "rx_xoff_frames" },
5264         { "tx_xon_frames" },
5265         { "tx_xoff_frames" },
5266         { "rx_mac_ctrl_frames" },
5267         { "rx_filtered_packets" },
5268         { "rx_discards" },
5269         { "rx_fw_discards" },
5270 };
5271
5272 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5273
5274 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5275     STATS_OFFSET32(stat_IfHCInOctets_hi),
5276     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5277     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5278     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5279     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5280     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5281     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5282     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5283     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5284     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5285     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5286     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5287     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5288     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5289     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5290     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5291     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5292     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5293     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5294     STATS_OFFSET32(stat_EtherStatsCollisions),
5295     STATS_OFFSET32(stat_EtherStatsFragments),
5296     STATS_OFFSET32(stat_EtherStatsJabbers),
5297     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5298     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5299     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5300     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5301     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5302     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5303     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5304     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5305     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5306     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5307     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5308     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5309     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5310     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5311     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5312     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5313     STATS_OFFSET32(stat_XonPauseFramesReceived),
5314     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5315     STATS_OFFSET32(stat_OutXonSent),
5316     STATS_OFFSET32(stat_OutXoffSent),
5317     STATS_OFFSET32(stat_MacControlFramesReceived),
5318     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5319     STATS_OFFSET32(stat_IfInMBUFDiscards),
5320     STATS_OFFSET32(stat_FwRxDrop),
5321 };
5322
5323 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5324  * skipped because of errata.
5325  */
5326 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5327         8,0,8,8,8,8,8,8,8,8,
5328         4,0,4,4,4,4,4,4,4,4,
5329         4,4,4,4,4,4,4,4,4,4,
5330         4,4,4,4,4,4,4,4,4,4,
5331         4,4,4,4,4,4,
5332 };
5333
5334 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5335         8,0,8,8,8,8,8,8,8,8,
5336         4,4,4,4,4,4,4,4,4,4,
5337         4,4,4,4,4,4,4,4,4,4,
5338         4,4,4,4,4,4,4,4,4,4,
5339         4,4,4,4,4,4,
5340 };
5341
5342 #define BNX2_NUM_TESTS 6
5343
5344 static struct {
5345         char string[ETH_GSTRING_LEN];
5346 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5347         { "register_test (offline)" },
5348         { "memory_test (offline)" },
5349         { "loopback_test (offline)" },
5350         { "nvram_test (online)" },
5351         { "interrupt_test (online)" },
5352         { "link_test (online)" },
5353 };
5354
5355 static int
5356 bnx2_self_test_count(struct net_device *dev)
5357 {
5358         return BNX2_NUM_TESTS;
5359 }
5360
5361 static void
5362 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5363 {
5364         struct bnx2 *bp = netdev_priv(dev);
5365
5366         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5367         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5368                 int i;
5369
5370                 bnx2_netif_stop(bp);
5371                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5372                 bnx2_free_skbs(bp);
5373
5374                 if (bnx2_test_registers(bp) != 0) {
5375                         buf[0] = 1;
5376                         etest->flags |= ETH_TEST_FL_FAILED;
5377                 }
5378                 if (bnx2_test_memory(bp) != 0) {
5379                         buf[1] = 1;
5380                         etest->flags |= ETH_TEST_FL_FAILED;
5381                 }
5382                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5383                         etest->flags |= ETH_TEST_FL_FAILED;
5384
5385                 if (!netif_running(bp->dev)) {
5386                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5387                 }
5388                 else {
5389                         bnx2_init_nic(bp);
5390                         bnx2_netif_start(bp);
5391                 }
5392
5393                 /* wait for link up */
5394                 for (i = 0; i < 7; i++) {
5395                         if (bp->link_up)
5396                                 break;
5397                         msleep_interruptible(1000);
5398                 }
5399         }
5400
5401         if (bnx2_test_nvram(bp) != 0) {
5402                 buf[3] = 1;
5403                 etest->flags |= ETH_TEST_FL_FAILED;
5404         }
5405         if (bnx2_test_intr(bp) != 0) {
5406                 buf[4] = 1;
5407                 etest->flags |= ETH_TEST_FL_FAILED;
5408         }
5409
5410         if (bnx2_test_link(bp) != 0) {
5411                 buf[5] = 1;
5412                 etest->flags |= ETH_TEST_FL_FAILED;
5413
5414         }
5415 }
5416
5417 static void
5418 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5419 {
5420         switch (stringset) {
5421         case ETH_SS_STATS:
5422                 memcpy(buf, bnx2_stats_str_arr,
5423                         sizeof(bnx2_stats_str_arr));
5424                 break;
5425         case ETH_SS_TEST:
5426                 memcpy(buf, bnx2_tests_str_arr,
5427                         sizeof(bnx2_tests_str_arr));
5428                 break;
5429         }
5430 }
5431
5432 static int
5433 bnx2_get_stats_count(struct net_device *dev)
5434 {
5435         return BNX2_NUM_STATS;
5436 }
5437
5438 static void
5439 bnx2_get_ethtool_stats(struct net_device *dev,
5440                 struct ethtool_stats *stats, u64 *buf)
5441 {
5442         struct bnx2 *bp = netdev_priv(dev);
5443         int i;
5444         u32 *hw_stats = (u32 *) bp->stats_blk;
5445         u8 *stats_len_arr = NULL;
5446
5447         if (hw_stats == NULL) {
5448                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5449                 return;
5450         }
5451
5452         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5453             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5454             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5455             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5456                 stats_len_arr = bnx2_5706_stats_len_arr;
5457         else
5458                 stats_len_arr = bnx2_5708_stats_len_arr;
5459
5460         for (i = 0; i < BNX2_NUM_STATS; i++) {
5461                 if (stats_len_arr[i] == 0) {
5462                         /* skip this counter */
5463                         buf[i] = 0;
5464                         continue;
5465                 }
5466                 if (stats_len_arr[i] == 4) {
5467                         /* 4-byte counter */
5468                         buf[i] = (u64)
5469                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5470                         continue;
5471                 }
5472                 /* 8-byte counter */
5473                 buf[i] = (((u64) *(hw_stats +
5474                                         bnx2_stats_offset_arr[i])) << 32) +
5475                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5476         }
5477 }
5478
5479 static int
5480 bnx2_phys_id(struct net_device *dev, u32 data)
5481 {
5482         struct bnx2 *bp = netdev_priv(dev);
5483         int i;
5484         u32 save;
5485
5486         if (data == 0)
5487                 data = 2;
5488
5489         save = REG_RD(bp, BNX2_MISC_CFG);
5490         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5491
5492         for (i = 0; i < (data * 2); i++) {
5493                 if ((i % 2) == 0) {
5494                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5495                 }
5496                 else {
5497                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5498                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5499                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5500                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5501                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5502                                 BNX2_EMAC_LED_TRAFFIC);
5503                 }
5504                 msleep_interruptible(500);
5505                 if (signal_pending(current))
5506                         break;
5507         }
5508         REG_WR(bp, BNX2_EMAC_LED, 0);
5509         REG_WR(bp, BNX2_MISC_CFG, save);
5510         return 0;
5511 }
5512
5513 static const struct ethtool_ops bnx2_ethtool_ops = {
5514         .get_settings           = bnx2_get_settings,
5515         .set_settings           = bnx2_set_settings,
5516         .get_drvinfo            = bnx2_get_drvinfo,
5517         .get_regs_len           = bnx2_get_regs_len,
5518         .get_regs               = bnx2_get_regs,
5519         .get_wol                = bnx2_get_wol,
5520         .set_wol                = bnx2_set_wol,
5521         .nway_reset             = bnx2_nway_reset,
5522         .get_link               = ethtool_op_get_link,
5523         .get_eeprom_len         = bnx2_get_eeprom_len,
5524         .get_eeprom             = bnx2_get_eeprom,
5525         .set_eeprom             = bnx2_set_eeprom,
5526         .get_coalesce           = bnx2_get_coalesce,
5527         .set_coalesce           = bnx2_set_coalesce,
5528         .get_ringparam          = bnx2_get_ringparam,
5529         .set_ringparam          = bnx2_set_ringparam,
5530         .get_pauseparam         = bnx2_get_pauseparam,
5531         .set_pauseparam         = bnx2_set_pauseparam,
5532         .get_rx_csum            = bnx2_get_rx_csum,
5533         .set_rx_csum            = bnx2_set_rx_csum,
5534         .get_tx_csum            = ethtool_op_get_tx_csum,
5535         .set_tx_csum            = ethtool_op_set_tx_csum,
5536         .get_sg                 = ethtool_op_get_sg,
5537         .set_sg                 = ethtool_op_set_sg,
5538         .get_tso                = ethtool_op_get_tso,
5539         .set_tso                = bnx2_set_tso,
5540         .self_test_count        = bnx2_self_test_count,
5541         .self_test              = bnx2_self_test,
5542         .get_strings            = bnx2_get_strings,
5543         .phys_id                = bnx2_phys_id,
5544         .get_stats_count        = bnx2_get_stats_count,
5545         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5546         .get_perm_addr          = ethtool_op_get_perm_addr,
5547 };
5548
5549 /* Called with rtnl_lock */
5550 static int
5551 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5552 {
5553         struct mii_ioctl_data *data = if_mii(ifr);
5554         struct bnx2 *bp = netdev_priv(dev);
5555         int err;
5556
5557         switch(cmd) {
5558         case SIOCGMIIPHY:
5559                 data->phy_id = bp->phy_addr;
5560
5561                 /* fallthru */
5562         case SIOCGMIIREG: {
5563                 u32 mii_regval;
5564
5565                 spin_lock_bh(&bp->phy_lock);
5566                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5567                 spin_unlock_bh(&bp->phy_lock);
5568
5569                 data->val_out = mii_regval;
5570
5571                 return err;
5572         }
5573
5574         case SIOCSMIIREG:
5575                 if (!capable(CAP_NET_ADMIN))
5576                         return -EPERM;
5577
5578                 spin_lock_bh(&bp->phy_lock);
5579                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5580                 spin_unlock_bh(&bp->phy_lock);
5581
5582                 return err;
5583
5584         default:
5585                 /* do nothing */
5586                 break;
5587         }
5588         return -EOPNOTSUPP;
5589 }
5590
5591 /* Called with rtnl_lock */
5592 static int
5593 bnx2_change_mac_addr(struct net_device *dev, void *p)
5594 {
5595         struct sockaddr *addr = p;
5596         struct bnx2 *bp = netdev_priv(dev);
5597
5598         if (!is_valid_ether_addr(addr->sa_data))
5599                 return -EINVAL;
5600
5601         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5602         if (netif_running(dev))
5603                 bnx2_set_mac_addr(bp);
5604
5605         return 0;
5606 }
5607
5608 /* Called with rtnl_lock */
5609 static int
5610 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5611 {
5612         struct bnx2 *bp = netdev_priv(dev);
5613
5614         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5615                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5616                 return -EINVAL;
5617
5618         dev->mtu = new_mtu;
5619         if (netif_running(dev)) {
5620                 bnx2_netif_stop(bp);
5621
5622                 bnx2_init_nic(bp);
5623
5624                 bnx2_netif_start(bp);
5625         }
5626         return 0;
5627 }
5628
5629 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5630 static void
5631 poll_bnx2(struct net_device *dev)
5632 {
5633         struct bnx2 *bp = netdev_priv(dev);
5634
5635         disable_irq(bp->pdev->irq);
5636         bnx2_interrupt(bp->pdev->irq, dev);
5637         enable_irq(bp->pdev->irq);
5638 }
5639 #endif
5640
5641 static void __devinit
5642 bnx2_get_5709_media(struct bnx2 *bp)
5643 {
5644         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5645         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5646         u32 strap;
5647
5648         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5649                 return;
5650         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5651                 bp->phy_flags |= PHY_SERDES_FLAG;
5652                 return;
5653         }
5654
5655         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5656                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5657         else
5658                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5659
5660         if (PCI_FUNC(bp->pdev->devfn) == 0) {
5661                 switch (strap) {
5662                 case 0x4:
5663                 case 0x5:
5664                 case 0x6:
5665                         bp->phy_flags |= PHY_SERDES_FLAG;
5666                         return;
5667                 }
5668         } else {
5669                 switch (strap) {
5670                 case 0x1:
5671                 case 0x2:
5672                 case 0x4:
5673                         bp->phy_flags |= PHY_SERDES_FLAG;
5674                         return;
5675                 }
5676         }
5677 }
5678
5679 static int __devinit
5680 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5681 {
5682         struct bnx2 *bp;
5683         unsigned long mem_len;
5684         int rc;
5685         u32 reg;
5686
5687         SET_MODULE_OWNER(dev);
5688         SET_NETDEV_DEV(dev, &pdev->dev);
5689         bp = netdev_priv(dev);
5690
5691         bp->flags = 0;
5692         bp->phy_flags = 0;
5693
5694         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5695         rc = pci_enable_device(pdev);
5696         if (rc) {
5697                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5698                 goto err_out;
5699         }
5700
5701         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5702                 dev_err(&pdev->dev,
5703                         "Cannot find PCI device base address, aborting.\n");
5704                 rc = -ENODEV;
5705                 goto err_out_disable;
5706         }
5707
5708         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5709         if (rc) {
5710                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5711                 goto err_out_disable;
5712         }
5713
5714         pci_set_master(pdev);
5715
5716         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5717         if (bp->pm_cap == 0) {
5718                 dev_err(&pdev->dev,
5719                         "Cannot find power management capability, aborting.\n");
5720                 rc = -EIO;
5721                 goto err_out_release;
5722         }
5723
5724         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5725                 bp->flags |= USING_DAC_FLAG;
5726                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5727                         dev_err(&pdev->dev,
5728                                 "pci_set_consistent_dma_mask failed, aborting.\n");
5729                         rc = -EIO;
5730                         goto err_out_release;
5731                 }
5732         }
5733         else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5734                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5735                 rc = -EIO;
5736                 goto err_out_release;
5737         }
5738
5739         bp->dev = dev;
5740         bp->pdev = pdev;
5741
5742         spin_lock_init(&bp->phy_lock);
5743         INIT_WORK(&bp->reset_task, bnx2_reset_task);
5744
5745         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5746         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5747         dev->mem_end = dev->mem_start + mem_len;
5748         dev->irq = pdev->irq;
5749
5750         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5751
5752         if (!bp->regview) {
5753                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5754                 rc = -ENOMEM;
5755                 goto err_out_release;
5756         }
5757
5758         /* Configure byte swap and enable write to the reg_window registers.
5759          * Rely on CPU to do target byte swapping on big endian systems
5760          * The chip's target access swapping will not swap all accesses
5761          */
5762         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5763                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5764                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5765
5766         bnx2_set_power_state(bp, PCI_D0);
5767
5768         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5769
5770         if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5771                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5772                 if (bp->pcix_cap == 0) {
5773                         dev_err(&pdev->dev,
5774                                 "Cannot find PCIX capability, aborting.\n");
5775                         rc = -EIO;
5776                         goto err_out_unmap;
5777                 }
5778         }
5779
5780         /* Get bus information. */
5781         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5782         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5783                 u32 clkreg;
5784
5785                 bp->flags |= PCIX_FLAG;
5786
5787                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5788
5789                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5790                 switch (clkreg) {
5791                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5792                         bp->bus_speed_mhz = 133;
5793                         break;
5794
5795                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5796                         bp->bus_speed_mhz = 100;
5797                         break;
5798
5799                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5800                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5801                         bp->bus_speed_mhz = 66;
5802                         break;
5803
5804                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5805                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5806                         bp->bus_speed_mhz = 50;
5807                         break;
5808
5809                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5810                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5811                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5812                         bp->bus_speed_mhz = 33;
5813                         break;
5814                 }
5815         }
5816         else {
5817                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5818                         bp->bus_speed_mhz = 66;
5819                 else
5820                         bp->bus_speed_mhz = 33;
5821         }
5822
5823         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5824                 bp->flags |= PCI_32BIT_FLAG;
5825
5826         /* 5706A0 may falsely detect SERR and PERR. */
5827         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5828                 reg = REG_RD(bp, PCI_COMMAND);
5829                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5830                 REG_WR(bp, PCI_COMMAND, reg);
5831         }
5832         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5833                 !(bp->flags & PCIX_FLAG)) {
5834
5835                 dev_err(&pdev->dev,
5836                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
5837                 goto err_out_unmap;
5838         }
5839
5840         bnx2_init_nvram(bp);
5841
5842         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5843
5844         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5845             BNX2_SHM_HDR_SIGNATURE_SIG) {
5846                 u32 off = PCI_FUNC(pdev->devfn) << 2;
5847
5848                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
5849         } else
5850                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5851
5852         /* Get the permanent MAC address.  First we need to make sure the
5853          * firmware is actually running.
5854          */
5855         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5856
5857         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5858             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5859                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5860                 rc = -ENODEV;
5861                 goto err_out_unmap;
5862         }
5863
5864         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5865
5866         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5867         bp->mac_addr[0] = (u8) (reg >> 8);
5868         bp->mac_addr[1] = (u8) reg;
5869
5870         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5871         bp->mac_addr[2] = (u8) (reg >> 24);
5872         bp->mac_addr[3] = (u8) (reg >> 16);
5873         bp->mac_addr[4] = (u8) (reg >> 8);
5874         bp->mac_addr[5] = (u8) reg;
5875
5876         bp->tx_ring_size = MAX_TX_DESC_CNT;
5877         bnx2_set_rx_ring_size(bp, 255);
5878
5879         bp->rx_csum = 1;
5880
5881         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5882
5883         bp->tx_quick_cons_trip_int = 20;
5884         bp->tx_quick_cons_trip = 20;
5885         bp->tx_ticks_int = 80;
5886         bp->tx_ticks = 80;
5887
5888         bp->rx_quick_cons_trip_int = 6;
5889         bp->rx_quick_cons_trip = 6;
5890         bp->rx_ticks_int = 18;
5891         bp->rx_ticks = 18;
5892
5893         bp->stats_ticks = 1000000 & 0xffff00;
5894
5895         bp->timer_interval =  HZ;
5896         bp->current_interval =  HZ;
5897
5898         bp->phy_addr = 1;
5899
5900         /* Disable WOL support if we are running on a SERDES chip. */
5901         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5902                 bnx2_get_5709_media(bp);
5903         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5904                 bp->phy_flags |= PHY_SERDES_FLAG;
5905
5906         if (bp->phy_flags & PHY_SERDES_FLAG) {
5907                 bp->flags |= NO_WOL_FLAG;
5908                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5909                         bp->phy_addr = 2;
5910                         reg = REG_RD_IND(bp, bp->shmem_base +
5911                                          BNX2_SHARED_HW_CFG_CONFIG);
5912                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5913                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5914                 }
5915         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5916                    CHIP_NUM(bp) == CHIP_NUM_5708)
5917                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
5918         else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
5919                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
5920
5921         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5922             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5923             (CHIP_ID(bp) == CHIP_ID_5708_B1))
5924                 bp->flags |= NO_WOL_FLAG;
5925
5926         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5927                 bp->tx_quick_cons_trip_int =
5928                         bp->tx_quick_cons_trip;
5929                 bp->tx_ticks_int = bp->tx_ticks;
5930                 bp->rx_quick_cons_trip_int =
5931                         bp->rx_quick_cons_trip;
5932                 bp->rx_ticks_int = bp->rx_ticks;
5933                 bp->comp_prod_trip_int = bp->comp_prod_trip;
5934                 bp->com_ticks_int = bp->com_ticks;
5935                 bp->cmd_ticks_int = bp->cmd_ticks;
5936         }
5937
5938         /* Disable MSI on 5706 if AMD 8132 bridge is found.
5939          *
5940          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
5941          * with byte enables disabled on the unused 32-bit word.  This is legal
5942          * but causes problems on the AMD 8132 which will eventually stop
5943          * responding after a while.
5944          *
5945          * AMD believes this incompatibility is unique to the 5706, and
5946          * prefers to locally disable MSI rather than globally disabling it.
5947          */
5948         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5949                 struct pci_dev *amd_8132 = NULL;
5950
5951                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5952                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
5953                                                   amd_8132))) {
5954                         u8 rev;
5955
5956                         pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5957                         if (rev >= 0x10 && rev <= 0x13) {
5958                                 disable_msi = 1;
5959                                 pci_dev_put(amd_8132);
5960                                 break;
5961                         }
5962                 }
5963         }
5964
5965         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5966         bp->req_line_speed = 0;
5967         if (bp->phy_flags & PHY_SERDES_FLAG) {
5968                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5969
5970                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5971                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5972                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5973                         bp->autoneg = 0;
5974                         bp->req_line_speed = bp->line_speed = SPEED_1000;
5975                         bp->req_duplex = DUPLEX_FULL;
5976                 }
5977         }
5978         else {
5979                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5980         }
5981
5982         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5983
5984         init_timer(&bp->timer);
5985         bp->timer.expires = RUN_AT(bp->timer_interval);
5986         bp->timer.data = (unsigned long) bp;
5987         bp->timer.function = bnx2_timer;
5988
5989         return 0;
5990
5991 err_out_unmap:
5992         if (bp->regview) {
5993                 iounmap(bp->regview);
5994                 bp->regview = NULL;
5995         }
5996
5997 err_out_release:
5998         pci_release_regions(pdev);
5999
6000 err_out_disable:
6001         pci_disable_device(pdev);
6002         pci_set_drvdata(pdev, NULL);
6003
6004 err_out:
6005         return rc;
6006 }
6007
6008 static int __devinit
6009 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6010 {
6011         static int version_printed = 0;
6012         struct net_device *dev = NULL;
6013         struct bnx2 *bp;
6014         int rc, i;
6015
6016         if (version_printed++ == 0)
6017                 printk(KERN_INFO "%s", version);
6018
6019         /* dev zeroed in init_etherdev */
6020         dev = alloc_etherdev(sizeof(*bp));
6021
6022         if (!dev)
6023                 return -ENOMEM;
6024
6025         rc = bnx2_init_board(pdev, dev);
6026         if (rc < 0) {
6027                 free_netdev(dev);
6028                 return rc;
6029         }
6030
6031         dev->open = bnx2_open;
6032         dev->hard_start_xmit = bnx2_start_xmit;
6033         dev->stop = bnx2_close;
6034         dev->get_stats = bnx2_get_stats;
6035         dev->set_multicast_list = bnx2_set_rx_mode;
6036         dev->do_ioctl = bnx2_ioctl;
6037         dev->set_mac_address = bnx2_change_mac_addr;
6038         dev->change_mtu = bnx2_change_mtu;
6039         dev->tx_timeout = bnx2_tx_timeout;
6040         dev->watchdog_timeo = TX_TIMEOUT;
6041 #ifdef BCM_VLAN
6042         dev->vlan_rx_register = bnx2_vlan_rx_register;
6043         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6044 #endif
6045         dev->poll = bnx2_poll;
6046         dev->ethtool_ops = &bnx2_ethtool_ops;
6047         dev->weight = 64;
6048
6049         bp = netdev_priv(dev);
6050
6051 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6052         dev->poll_controller = poll_bnx2;
6053 #endif
6054
6055         if ((rc = register_netdev(dev))) {
6056                 dev_err(&pdev->dev, "Cannot register net device\n");
6057                 if (bp->regview)
6058                         iounmap(bp->regview);
6059                 pci_release_regions(pdev);
6060                 pci_disable_device(pdev);
6061                 pci_set_drvdata(pdev, NULL);
6062                 free_netdev(dev);
6063                 return rc;
6064         }
6065
6066         pci_set_drvdata(pdev, dev);
6067
6068         memcpy(dev->dev_addr, bp->mac_addr, 6);
6069         memcpy(dev->perm_addr, bp->mac_addr, 6);
6070         bp->name = board_info[ent->driver_data].name,
6071         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6072                 "IRQ %d, ",
6073                 dev->name,
6074                 bp->name,
6075                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6076                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6077                 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6078                 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6079                 bp->bus_speed_mhz,
6080                 dev->base_addr,
6081                 bp->pdev->irq);
6082
6083         printk("node addr ");
6084         for (i = 0; i < 6; i++)
6085                 printk("%2.2x", dev->dev_addr[i]);
6086         printk("\n");
6087
6088         dev->features |= NETIF_F_SG;
6089         if (bp->flags & USING_DAC_FLAG)
6090                 dev->features |= NETIF_F_HIGHDMA;
6091         dev->features |= NETIF_F_IP_CSUM;
6092 #ifdef BCM_VLAN
6093         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6094 #endif
6095         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6096
6097         netif_carrier_off(bp->dev);
6098
6099         return 0;
6100 }
6101
6102 static void __devexit
6103 bnx2_remove_one(struct pci_dev *pdev)
6104 {
6105         struct net_device *dev = pci_get_drvdata(pdev);
6106         struct bnx2 *bp = netdev_priv(dev);
6107
6108         flush_scheduled_work();
6109
6110         unregister_netdev(dev);
6111
6112         if (bp->regview)
6113                 iounmap(bp->regview);
6114
6115         free_netdev(dev);
6116         pci_release_regions(pdev);
6117         pci_disable_device(pdev);
6118         pci_set_drvdata(pdev, NULL);
6119 }
6120
6121 static int
6122 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6123 {
6124         struct net_device *dev = pci_get_drvdata(pdev);
6125         struct bnx2 *bp = netdev_priv(dev);
6126         u32 reset_code;
6127
6128         if (!netif_running(dev))
6129                 return 0;
6130
6131         flush_scheduled_work();
6132         bnx2_netif_stop(bp);
6133         netif_device_detach(dev);
6134         del_timer_sync(&bp->timer);
6135         if (bp->flags & NO_WOL_FLAG)
6136                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6137         else if (bp->wol)
6138                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6139         else
6140                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6141         bnx2_reset_chip(bp, reset_code);
6142         bnx2_free_skbs(bp);
6143         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6144         return 0;
6145 }
6146
6147 static int
6148 bnx2_resume(struct pci_dev *pdev)
6149 {
6150         struct net_device *dev = pci_get_drvdata(pdev);
6151         struct bnx2 *bp = netdev_priv(dev);
6152
6153         if (!netif_running(dev))
6154                 return 0;
6155
6156         bnx2_set_power_state(bp, PCI_D0);
6157         netif_device_attach(dev);
6158         bnx2_init_nic(bp);
6159         bnx2_netif_start(bp);
6160         return 0;
6161 }
6162
6163 static struct pci_driver bnx2_pci_driver = {
6164         .name           = DRV_MODULE_NAME,
6165         .id_table       = bnx2_pci_tbl,
6166         .probe          = bnx2_init_one,
6167         .remove         = __devexit_p(bnx2_remove_one),
6168         .suspend        = bnx2_suspend,
6169         .resume         = bnx2_resume,
6170 };
6171
6172 static int __init bnx2_init(void)
6173 {
6174         return pci_register_driver(&bnx2_pci_driver);
6175 }
6176
6177 static void __exit bnx2_cleanup(void)
6178 {
6179         pci_unregister_driver(&bnx2_pci_driver);
6180 }
6181
6182 module_init(bnx2_init);
6183 module_exit(bnx2_cleanup);
6184
6185
6186