Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[sfrench/cifs-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50 #include <linux/log2.h>
51
52 #include "bnx2.h"
53 #include "bnx2_fw.h"
54 #include "bnx2_fw2.h"
55
56 #define FW_BUF_SIZE             0x10000
57
58 #define DRV_MODULE_NAME         "bnx2"
59 #define PFX DRV_MODULE_NAME     ": "
60 #define DRV_MODULE_VERSION      "1.9.2"
61 #define DRV_MODULE_RELDATE      "Feb 11, 2009"
62
63 #define RUN_AT(x) (jiffies + (x))
64
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT  (5*HZ)
67
68 static char version[] __devinitdata =
69         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_msi = 0;
77
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81 typedef enum {
82         BCM5706 = 0,
83         NC370T,
84         NC370I,
85         BCM5706S,
86         NC370F,
87         BCM5708,
88         BCM5708S,
89         BCM5709,
90         BCM5709S,
91         BCM5716,
92         BCM5716S,
93 } board_t;
94
95 /* indexed by board_t, above */
96 static struct {
97         char *name;
98 } board_info[] __devinitdata = {
99         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
100         { "HP NC370T Multifunction Gigabit Server Adapter" },
101         { "HP NC370i Multifunction Gigabit Server Adapter" },
102         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
103         { "HP NC370F Multifunction Gigabit Server Adapter" },
104         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
105         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
106         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
107         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
108         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
109         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
110         };
111
112 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
113         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
114           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
116           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
118           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
120           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
122           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
124           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
126           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
128           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
131         { PCI_VENDOR_ID_BROADCOM, 0x163b,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
133         { PCI_VENDOR_ID_BROADCOM, 0x163c,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
135         { 0, }
136 };
137
138 static struct flash_spec flash_table[] =
139 {
140 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
141 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
142         /* Slow EEPROM */
143         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
144          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
145          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
146          "EEPROM - slow"},
147         /* Expansion entry 0001 */
148         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
149          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151          "Entry 0001"},
152         /* Saifun SA25F010 (non-buffered flash) */
153         /* strap, cfg1, & write1 need updates */
154         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
155          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
157          "Non-buffered flash (128kB)"},
158         /* Saifun SA25F020 (non-buffered flash) */
159         /* strap, cfg1, & write1 need updates */
160         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
163          "Non-buffered flash (256kB)"},
164         /* Expansion entry 0100 */
165         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
168          "Entry 0100"},
169         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
170         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
171          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
172          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
173          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
174         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
175         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
176          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
177          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
178          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
179         /* Saifun SA25F005 (non-buffered flash) */
180         /* strap, cfg1, & write1 need updates */
181         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
183          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
184          "Non-buffered flash (64kB)"},
185         /* Fast EEPROM */
186         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
187          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
188          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
189          "EEPROM - fast"},
190         /* Expansion entry 1001 */
191         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
192          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194          "Entry 1001"},
195         /* Expansion entry 1010 */
196         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
197          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199          "Entry 1010"},
200         /* ATMEL AT45DB011B (buffered flash) */
201         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
202          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
203          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
204          "Buffered flash (128kB)"},
205         /* Expansion entry 1100 */
206         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
207          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209          "Entry 1100"},
210         /* Expansion entry 1101 */
211         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
212          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214          "Entry 1101"},
215         /* Ateml Expansion entry 1110 */
216         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
217          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
218          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
219          "Entry 1110 (Atmel)"},
220         /* ATMEL AT45DB021B (buffered flash) */
221         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
222          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
223          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
224          "Buffered flash (256kB)"},
225 };
226
227 static struct flash_spec flash_5709 = {
228         .flags          = BNX2_NV_BUFFERED,
229         .page_bits      = BCM5709_FLASH_PAGE_BITS,
230         .page_size      = BCM5709_FLASH_PAGE_SIZE,
231         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
232         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
233         .name           = "5709 Buffered flash (256kB)",
234 };
235
236 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
237
238 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
239 {
240         u32 diff;
241
242         smp_mb();
243
244         /* The ring uses 256 indices for 255 entries, one of them
245          * needs to be skipped.
246          */
247         diff = txr->tx_prod - txr->tx_cons;
248         if (unlikely(diff >= TX_DESC_CNT)) {
249                 diff &= 0xffff;
250                 if (diff == TX_DESC_CNT)
251                         diff = MAX_TX_DESC_CNT;
252         }
253         return (bp->tx_ring_size - diff);
254 }
255
256 static u32
257 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
258 {
259         u32 val;
260
261         spin_lock_bh(&bp->indirect_lock);
262         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
263         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
264         spin_unlock_bh(&bp->indirect_lock);
265         return val;
266 }
267
268 static void
269 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
270 {
271         spin_lock_bh(&bp->indirect_lock);
272         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
273         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
274         spin_unlock_bh(&bp->indirect_lock);
275 }
276
277 static void
278 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
279 {
280         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
281 }
282
283 static u32
284 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
285 {
286         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
287 }
288
289 static void
290 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
291 {
292         offset += cid_addr;
293         spin_lock_bh(&bp->indirect_lock);
294         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
295                 int i;
296
297                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
298                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
299                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
300                 for (i = 0; i < 5; i++) {
301                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
302                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
303                                 break;
304                         udelay(5);
305                 }
306         } else {
307                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
308                 REG_WR(bp, BNX2_CTX_DATA, val);
309         }
310         spin_unlock_bh(&bp->indirect_lock);
311 }
312
313 static int
314 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
315 {
316         u32 val1;
317         int i, ret;
318
319         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
320                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
322
323                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
324                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325
326                 udelay(40);
327         }
328
329         val1 = (bp->phy_addr << 21) | (reg << 16) |
330                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
331                 BNX2_EMAC_MDIO_COMM_START_BUSY;
332         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
333
334         for (i = 0; i < 50; i++) {
335                 udelay(10);
336
337                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
339                         udelay(5);
340
341                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
342                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
343
344                         break;
345                 }
346         }
347
348         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349                 *val = 0x0;
350                 ret = -EBUSY;
351         }
352         else {
353                 *val = val1;
354                 ret = 0;
355         }
356
357         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
358                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
359                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360
361                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
362                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363
364                 udelay(40);
365         }
366
367         return ret;
368 }
369
370 static int
371 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
372 {
373         u32 val1;
374         int i, ret;
375
376         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
377                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
379
380                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
381                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
382
383                 udelay(40);
384         }
385
386         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
387                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
388                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
389         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
390
391         for (i = 0; i < 50; i++) {
392                 udelay(10);
393
394                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
395                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
396                         udelay(5);
397                         break;
398                 }
399         }
400
401         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
402                 ret = -EBUSY;
403         else
404                 ret = 0;
405
406         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
407                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
408                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
409
410                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
411                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
412
413                 udelay(40);
414         }
415
416         return ret;
417 }
418
419 static void
420 bnx2_disable_int(struct bnx2 *bp)
421 {
422         int i;
423         struct bnx2_napi *bnapi;
424
425         for (i = 0; i < bp->irq_nvecs; i++) {
426                 bnapi = &bp->bnx2_napi[i];
427                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
429         }
430         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
431 }
432
433 static void
434 bnx2_enable_int(struct bnx2 *bp)
435 {
436         int i;
437         struct bnx2_napi *bnapi;
438
439         for (i = 0; i < bp->irq_nvecs; i++) {
440                 bnapi = &bp->bnx2_napi[i];
441
442                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
443                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
444                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
445                        bnapi->last_status_idx);
446
447                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
448                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
449                        bnapi->last_status_idx);
450         }
451         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
452 }
453
454 static void
455 bnx2_disable_int_sync(struct bnx2 *bp)
456 {
457         int i;
458
459         atomic_inc(&bp->intr_sem);
460         bnx2_disable_int(bp);
461         for (i = 0; i < bp->irq_nvecs; i++)
462                 synchronize_irq(bp->irq_tbl[i].vector);
463 }
464
465 static void
466 bnx2_napi_disable(struct bnx2 *bp)
467 {
468         int i;
469
470         for (i = 0; i < bp->irq_nvecs; i++)
471                 napi_disable(&bp->bnx2_napi[i].napi);
472 }
473
474 static void
475 bnx2_napi_enable(struct bnx2 *bp)
476 {
477         int i;
478
479         for (i = 0; i < bp->irq_nvecs; i++)
480                 napi_enable(&bp->bnx2_napi[i].napi);
481 }
482
483 static void
484 bnx2_netif_stop(struct bnx2 *bp)
485 {
486         bnx2_disable_int_sync(bp);
487         if (netif_running(bp->dev)) {
488                 bnx2_napi_disable(bp);
489                 netif_tx_disable(bp->dev);
490                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
491         }
492 }
493
494 static void
495 bnx2_netif_start(struct bnx2 *bp)
496 {
497         if (atomic_dec_and_test(&bp->intr_sem)) {
498                 if (netif_running(bp->dev)) {
499                         netif_tx_wake_all_queues(bp->dev);
500                         bnx2_napi_enable(bp);
501                         bnx2_enable_int(bp);
502                 }
503         }
504 }
505
506 static void
507 bnx2_free_tx_mem(struct bnx2 *bp)
508 {
509         int i;
510
511         for (i = 0; i < bp->num_tx_rings; i++) {
512                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
513                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
514
515                 if (txr->tx_desc_ring) {
516                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
517                                             txr->tx_desc_ring,
518                                             txr->tx_desc_mapping);
519                         txr->tx_desc_ring = NULL;
520                 }
521                 kfree(txr->tx_buf_ring);
522                 txr->tx_buf_ring = NULL;
523         }
524 }
525
526 static void
527 bnx2_free_rx_mem(struct bnx2 *bp)
528 {
529         int i;
530
531         for (i = 0; i < bp->num_rx_rings; i++) {
532                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
533                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
534                 int j;
535
536                 for (j = 0; j < bp->rx_max_ring; j++) {
537                         if (rxr->rx_desc_ring[j])
538                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
539                                                     rxr->rx_desc_ring[j],
540                                                     rxr->rx_desc_mapping[j]);
541                         rxr->rx_desc_ring[j] = NULL;
542                 }
543                 if (rxr->rx_buf_ring)
544                         vfree(rxr->rx_buf_ring);
545                 rxr->rx_buf_ring = NULL;
546
547                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
548                         if (rxr->rx_pg_desc_ring[j])
549                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
550                                                     rxr->rx_pg_desc_ring[j],
551                                                     rxr->rx_pg_desc_mapping[j]);
552                         rxr->rx_pg_desc_ring[j] = NULL;
553                 }
554                 if (rxr->rx_pg_ring)
555                         vfree(rxr->rx_pg_ring);
556                 rxr->rx_pg_ring = NULL;
557         }
558 }
559
560 static int
561 bnx2_alloc_tx_mem(struct bnx2 *bp)
562 {
563         int i;
564
565         for (i = 0; i < bp->num_tx_rings; i++) {
566                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
567                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
568
569                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
570                 if (txr->tx_buf_ring == NULL)
571                         return -ENOMEM;
572
573                 txr->tx_desc_ring =
574                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
575                                              &txr->tx_desc_mapping);
576                 if (txr->tx_desc_ring == NULL)
577                         return -ENOMEM;
578         }
579         return 0;
580 }
581
582 static int
583 bnx2_alloc_rx_mem(struct bnx2 *bp)
584 {
585         int i;
586
587         for (i = 0; i < bp->num_rx_rings; i++) {
588                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
589                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
590                 int j;
591
592                 rxr->rx_buf_ring =
593                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
594                 if (rxr->rx_buf_ring == NULL)
595                         return -ENOMEM;
596
597                 memset(rxr->rx_buf_ring, 0,
598                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
599
600                 for (j = 0; j < bp->rx_max_ring; j++) {
601                         rxr->rx_desc_ring[j] =
602                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
603                                                      &rxr->rx_desc_mapping[j]);
604                         if (rxr->rx_desc_ring[j] == NULL)
605                                 return -ENOMEM;
606
607                 }
608
609                 if (bp->rx_pg_ring_size) {
610                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
611                                                   bp->rx_max_pg_ring);
612                         if (rxr->rx_pg_ring == NULL)
613                                 return -ENOMEM;
614
615                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
616                                bp->rx_max_pg_ring);
617                 }
618
619                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
620                         rxr->rx_pg_desc_ring[j] =
621                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
622                                                 &rxr->rx_pg_desc_mapping[j]);
623                         if (rxr->rx_pg_desc_ring[j] == NULL)
624                                 return -ENOMEM;
625
626                 }
627         }
628         return 0;
629 }
630
631 static void
632 bnx2_free_mem(struct bnx2 *bp)
633 {
634         int i;
635         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
636
637         bnx2_free_tx_mem(bp);
638         bnx2_free_rx_mem(bp);
639
640         for (i = 0; i < bp->ctx_pages; i++) {
641                 if (bp->ctx_blk[i]) {
642                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
643                                             bp->ctx_blk[i],
644                                             bp->ctx_blk_mapping[i]);
645                         bp->ctx_blk[i] = NULL;
646                 }
647         }
648         if (bnapi->status_blk.msi) {
649                 pci_free_consistent(bp->pdev, bp->status_stats_size,
650                                     bnapi->status_blk.msi,
651                                     bp->status_blk_mapping);
652                 bnapi->status_blk.msi = NULL;
653                 bp->stats_blk = NULL;
654         }
655 }
656
657 static int
658 bnx2_alloc_mem(struct bnx2 *bp)
659 {
660         int i, status_blk_size, err;
661         struct bnx2_napi *bnapi;
662         void *status_blk;
663
664         /* Combine status and statistics blocks into one allocation. */
665         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
666         if (bp->flags & BNX2_FLAG_MSIX_CAP)
667                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
668                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
669         bp->status_stats_size = status_blk_size +
670                                 sizeof(struct statistics_block);
671
672         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
673                                           &bp->status_blk_mapping);
674         if (status_blk == NULL)
675                 goto alloc_mem_err;
676
677         memset(status_blk, 0, bp->status_stats_size);
678
679         bnapi = &bp->bnx2_napi[0];
680         bnapi->status_blk.msi = status_blk;
681         bnapi->hw_tx_cons_ptr =
682                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
683         bnapi->hw_rx_cons_ptr =
684                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
685         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
686                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
687                         struct status_block_msix *sblk;
688
689                         bnapi = &bp->bnx2_napi[i];
690
691                         sblk = (void *) (status_blk +
692                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
693                         bnapi->status_blk.msix = sblk;
694                         bnapi->hw_tx_cons_ptr =
695                                 &sblk->status_tx_quick_consumer_index;
696                         bnapi->hw_rx_cons_ptr =
697                                 &sblk->status_rx_quick_consumer_index;
698                         bnapi->int_num = i << 24;
699                 }
700         }
701
702         bp->stats_blk = status_blk + status_blk_size;
703
704         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
705
706         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
707                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
708                 if (bp->ctx_pages == 0)
709                         bp->ctx_pages = 1;
710                 for (i = 0; i < bp->ctx_pages; i++) {
711                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
712                                                 BCM_PAGE_SIZE,
713                                                 &bp->ctx_blk_mapping[i]);
714                         if (bp->ctx_blk[i] == NULL)
715                                 goto alloc_mem_err;
716                 }
717         }
718
719         err = bnx2_alloc_rx_mem(bp);
720         if (err)
721                 goto alloc_mem_err;
722
723         err = bnx2_alloc_tx_mem(bp);
724         if (err)
725                 goto alloc_mem_err;
726
727         return 0;
728
729 alloc_mem_err:
730         bnx2_free_mem(bp);
731         return -ENOMEM;
732 }
733
734 static void
735 bnx2_report_fw_link(struct bnx2 *bp)
736 {
737         u32 fw_link_status = 0;
738
739         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
740                 return;
741
742         if (bp->link_up) {
743                 u32 bmsr;
744
745                 switch (bp->line_speed) {
746                 case SPEED_10:
747                         if (bp->duplex == DUPLEX_HALF)
748                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
749                         else
750                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
751                         break;
752                 case SPEED_100:
753                         if (bp->duplex == DUPLEX_HALF)
754                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
755                         else
756                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
757                         break;
758                 case SPEED_1000:
759                         if (bp->duplex == DUPLEX_HALF)
760                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
761                         else
762                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
763                         break;
764                 case SPEED_2500:
765                         if (bp->duplex == DUPLEX_HALF)
766                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
767                         else
768                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
769                         break;
770                 }
771
772                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
773
774                 if (bp->autoneg) {
775                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
776
777                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
778                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
779
780                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
781                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
782                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
783                         else
784                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
785                 }
786         }
787         else
788                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
789
790         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
791 }
792
793 static char *
794 bnx2_xceiver_str(struct bnx2 *bp)
795 {
796         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
797                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
798                  "Copper"));
799 }
800
801 static void
802 bnx2_report_link(struct bnx2 *bp)
803 {
804         if (bp->link_up) {
805                 netif_carrier_on(bp->dev);
806                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
807                        bnx2_xceiver_str(bp));
808
809                 printk("%d Mbps ", bp->line_speed);
810
811                 if (bp->duplex == DUPLEX_FULL)
812                         printk("full duplex");
813                 else
814                         printk("half duplex");
815
816                 if (bp->flow_ctrl) {
817                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
818                                 printk(", receive ");
819                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
820                                         printk("& transmit ");
821                         }
822                         else {
823                                 printk(", transmit ");
824                         }
825                         printk("flow control ON");
826                 }
827                 printk("\n");
828         }
829         else {
830                 netif_carrier_off(bp->dev);
831                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
832                        bnx2_xceiver_str(bp));
833         }
834
835         bnx2_report_fw_link(bp);
836 }
837
838 static void
839 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
840 {
841         u32 local_adv, remote_adv;
842
843         bp->flow_ctrl = 0;
844         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
845                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
846
847                 if (bp->duplex == DUPLEX_FULL) {
848                         bp->flow_ctrl = bp->req_flow_ctrl;
849                 }
850                 return;
851         }
852
853         if (bp->duplex != DUPLEX_FULL) {
854                 return;
855         }
856
857         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
858             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
859                 u32 val;
860
861                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
862                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
863                         bp->flow_ctrl |= FLOW_CTRL_TX;
864                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
865                         bp->flow_ctrl |= FLOW_CTRL_RX;
866                 return;
867         }
868
869         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
870         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
871
872         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
873                 u32 new_local_adv = 0;
874                 u32 new_remote_adv = 0;
875
876                 if (local_adv & ADVERTISE_1000XPAUSE)
877                         new_local_adv |= ADVERTISE_PAUSE_CAP;
878                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
879                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
880                 if (remote_adv & ADVERTISE_1000XPAUSE)
881                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
882                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
883                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
884
885                 local_adv = new_local_adv;
886                 remote_adv = new_remote_adv;
887         }
888
889         /* See Table 28B-3 of 802.3ab-1999 spec. */
890         if (local_adv & ADVERTISE_PAUSE_CAP) {
891                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
892                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
893                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
894                         }
895                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
896                                 bp->flow_ctrl = FLOW_CTRL_RX;
897                         }
898                 }
899                 else {
900                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
901                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
902                         }
903                 }
904         }
905         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
906                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
907                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
908
909                         bp->flow_ctrl = FLOW_CTRL_TX;
910                 }
911         }
912 }
913
914 static int
915 bnx2_5709s_linkup(struct bnx2 *bp)
916 {
917         u32 val, speed;
918
919         bp->link_up = 1;
920
921         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
922         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
923         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
924
925         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
926                 bp->line_speed = bp->req_line_speed;
927                 bp->duplex = bp->req_duplex;
928                 return 0;
929         }
930         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
931         switch (speed) {
932                 case MII_BNX2_GP_TOP_AN_SPEED_10:
933                         bp->line_speed = SPEED_10;
934                         break;
935                 case MII_BNX2_GP_TOP_AN_SPEED_100:
936                         bp->line_speed = SPEED_100;
937                         break;
938                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
939                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
940                         bp->line_speed = SPEED_1000;
941                         break;
942                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
943                         bp->line_speed = SPEED_2500;
944                         break;
945         }
946         if (val & MII_BNX2_GP_TOP_AN_FD)
947                 bp->duplex = DUPLEX_FULL;
948         else
949                 bp->duplex = DUPLEX_HALF;
950         return 0;
951 }
952
953 static int
954 bnx2_5708s_linkup(struct bnx2 *bp)
955 {
956         u32 val;
957
958         bp->link_up = 1;
959         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
960         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
961                 case BCM5708S_1000X_STAT1_SPEED_10:
962                         bp->line_speed = SPEED_10;
963                         break;
964                 case BCM5708S_1000X_STAT1_SPEED_100:
965                         bp->line_speed = SPEED_100;
966                         break;
967                 case BCM5708S_1000X_STAT1_SPEED_1G:
968                         bp->line_speed = SPEED_1000;
969                         break;
970                 case BCM5708S_1000X_STAT1_SPEED_2G5:
971                         bp->line_speed = SPEED_2500;
972                         break;
973         }
974         if (val & BCM5708S_1000X_STAT1_FD)
975                 bp->duplex = DUPLEX_FULL;
976         else
977                 bp->duplex = DUPLEX_HALF;
978
979         return 0;
980 }
981
982 static int
983 bnx2_5706s_linkup(struct bnx2 *bp)
984 {
985         u32 bmcr, local_adv, remote_adv, common;
986
987         bp->link_up = 1;
988         bp->line_speed = SPEED_1000;
989
990         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
991         if (bmcr & BMCR_FULLDPLX) {
992                 bp->duplex = DUPLEX_FULL;
993         }
994         else {
995                 bp->duplex = DUPLEX_HALF;
996         }
997
998         if (!(bmcr & BMCR_ANENABLE)) {
999                 return 0;
1000         }
1001
1002         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1003         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1004
1005         common = local_adv & remote_adv;
1006         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1007
1008                 if (common & ADVERTISE_1000XFULL) {
1009                         bp->duplex = DUPLEX_FULL;
1010                 }
1011                 else {
1012                         bp->duplex = DUPLEX_HALF;
1013                 }
1014         }
1015
1016         return 0;
1017 }
1018
1019 static int
1020 bnx2_copper_linkup(struct bnx2 *bp)
1021 {
1022         u32 bmcr;
1023
1024         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1025         if (bmcr & BMCR_ANENABLE) {
1026                 u32 local_adv, remote_adv, common;
1027
1028                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1029                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1030
1031                 common = local_adv & (remote_adv >> 2);
1032                 if (common & ADVERTISE_1000FULL) {
1033                         bp->line_speed = SPEED_1000;
1034                         bp->duplex = DUPLEX_FULL;
1035                 }
1036                 else if (common & ADVERTISE_1000HALF) {
1037                         bp->line_speed = SPEED_1000;
1038                         bp->duplex = DUPLEX_HALF;
1039                 }
1040                 else {
1041                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1042                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1043
1044                         common = local_adv & remote_adv;
1045                         if (common & ADVERTISE_100FULL) {
1046                                 bp->line_speed = SPEED_100;
1047                                 bp->duplex = DUPLEX_FULL;
1048                         }
1049                         else if (common & ADVERTISE_100HALF) {
1050                                 bp->line_speed = SPEED_100;
1051                                 bp->duplex = DUPLEX_HALF;
1052                         }
1053                         else if (common & ADVERTISE_10FULL) {
1054                                 bp->line_speed = SPEED_10;
1055                                 bp->duplex = DUPLEX_FULL;
1056                         }
1057                         else if (common & ADVERTISE_10HALF) {
1058                                 bp->line_speed = SPEED_10;
1059                                 bp->duplex = DUPLEX_HALF;
1060                         }
1061                         else {
1062                                 bp->line_speed = 0;
1063                                 bp->link_up = 0;
1064                         }
1065                 }
1066         }
1067         else {
1068                 if (bmcr & BMCR_SPEED100) {
1069                         bp->line_speed = SPEED_100;
1070                 }
1071                 else {
1072                         bp->line_speed = SPEED_10;
1073                 }
1074                 if (bmcr & BMCR_FULLDPLX) {
1075                         bp->duplex = DUPLEX_FULL;
1076                 }
1077                 else {
1078                         bp->duplex = DUPLEX_HALF;
1079                 }
1080         }
1081
1082         return 0;
1083 }
1084
1085 static void
1086 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1087 {
1088         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1089
1090         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1091         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1092         val |= 0x02 << 8;
1093
1094         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1095                 u32 lo_water, hi_water;
1096
1097                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1098                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1099                 else
1100                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1101                 if (lo_water >= bp->rx_ring_size)
1102                         lo_water = 0;
1103
1104                 hi_water = bp->rx_ring_size / 4;
1105
1106                 if (hi_water <= lo_water)
1107                         lo_water = 0;
1108
1109                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1110                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1111
1112                 if (hi_water > 0xf)
1113                         hi_water = 0xf;
1114                 else if (hi_water == 0)
1115                         lo_water = 0;
1116                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1117         }
1118         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1119 }
1120
1121 static void
1122 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1123 {
1124         int i;
1125         u32 cid;
1126
1127         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1128                 if (i == 1)
1129                         cid = RX_RSS_CID;
1130                 bnx2_init_rx_context(bp, cid);
1131         }
1132 }
1133
1134 static void
1135 bnx2_set_mac_link(struct bnx2 *bp)
1136 {
1137         u32 val;
1138
1139         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1140         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1141                 (bp->duplex == DUPLEX_HALF)) {
1142                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1143         }
1144
1145         /* Configure the EMAC mode register. */
1146         val = REG_RD(bp, BNX2_EMAC_MODE);
1147
1148         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1149                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1150                 BNX2_EMAC_MODE_25G_MODE);
1151
1152         if (bp->link_up) {
1153                 switch (bp->line_speed) {
1154                         case SPEED_10:
1155                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1156                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1157                                         break;
1158                                 }
1159                                 /* fall through */
1160                         case SPEED_100:
1161                                 val |= BNX2_EMAC_MODE_PORT_MII;
1162                                 break;
1163                         case SPEED_2500:
1164                                 val |= BNX2_EMAC_MODE_25G_MODE;
1165                                 /* fall through */
1166                         case SPEED_1000:
1167                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1168                                 break;
1169                 }
1170         }
1171         else {
1172                 val |= BNX2_EMAC_MODE_PORT_GMII;
1173         }
1174
1175         /* Set the MAC to operate in the appropriate duplex mode. */
1176         if (bp->duplex == DUPLEX_HALF)
1177                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1178         REG_WR(bp, BNX2_EMAC_MODE, val);
1179
1180         /* Enable/disable rx PAUSE. */
1181         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1182
1183         if (bp->flow_ctrl & FLOW_CTRL_RX)
1184                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1185         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1186
1187         /* Enable/disable tx PAUSE. */
1188         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1189         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1190
1191         if (bp->flow_ctrl & FLOW_CTRL_TX)
1192                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1193         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1194
1195         /* Acknowledge the interrupt. */
1196         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1197
1198         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1199                 bnx2_init_all_rx_contexts(bp);
1200 }
1201
1202 static void
1203 bnx2_enable_bmsr1(struct bnx2 *bp)
1204 {
1205         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1206             (CHIP_NUM(bp) == CHIP_NUM_5709))
1207                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1208                                MII_BNX2_BLK_ADDR_GP_STATUS);
1209 }
1210
1211 static void
1212 bnx2_disable_bmsr1(struct bnx2 *bp)
1213 {
1214         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1215             (CHIP_NUM(bp) == CHIP_NUM_5709))
1216                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1217                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1218 }
1219
1220 static int
1221 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1222 {
1223         u32 up1;
1224         int ret = 1;
1225
1226         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1227                 return 0;
1228
1229         if (bp->autoneg & AUTONEG_SPEED)
1230                 bp->advertising |= ADVERTISED_2500baseX_Full;
1231
1232         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1233                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1234
1235         bnx2_read_phy(bp, bp->mii_up1, &up1);
1236         if (!(up1 & BCM5708S_UP1_2G5)) {
1237                 up1 |= BCM5708S_UP1_2G5;
1238                 bnx2_write_phy(bp, bp->mii_up1, up1);
1239                 ret = 0;
1240         }
1241
1242         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1243                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1244                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1245
1246         return ret;
1247 }
1248
1249 static int
1250 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1251 {
1252         u32 up1;
1253         int ret = 0;
1254
1255         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1256                 return 0;
1257
1258         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1259                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1260
1261         bnx2_read_phy(bp, bp->mii_up1, &up1);
1262         if (up1 & BCM5708S_UP1_2G5) {
1263                 up1 &= ~BCM5708S_UP1_2G5;
1264                 bnx2_write_phy(bp, bp->mii_up1, up1);
1265                 ret = 1;
1266         }
1267
1268         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1269                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1270                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1271
1272         return ret;
1273 }
1274
1275 static void
1276 bnx2_enable_forced_2g5(struct bnx2 *bp)
1277 {
1278         u32 bmcr;
1279
1280         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1281                 return;
1282
1283         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1284                 u32 val;
1285
1286                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1287                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1288                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1289                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1290                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1291                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1292
1293                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1294                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1295                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1296
1297         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1298                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1299                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1300         }
1301
1302         if (bp->autoneg & AUTONEG_SPEED) {
1303                 bmcr &= ~BMCR_ANENABLE;
1304                 if (bp->req_duplex == DUPLEX_FULL)
1305                         bmcr |= BMCR_FULLDPLX;
1306         }
1307         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1308 }
1309
1310 static void
1311 bnx2_disable_forced_2g5(struct bnx2 *bp)
1312 {
1313         u32 bmcr;
1314
1315         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1316                 return;
1317
1318         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1319                 u32 val;
1320
1321                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1322                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1323                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1324                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1325                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1326
1327                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1328                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1329                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1330
1331         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1332                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1333                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1334         }
1335
1336         if (bp->autoneg & AUTONEG_SPEED)
1337                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1338         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1339 }
1340
1341 static void
1342 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1343 {
1344         u32 val;
1345
1346         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1347         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1348         if (start)
1349                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1350         else
1351                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1352 }
1353
1354 static int
1355 bnx2_set_link(struct bnx2 *bp)
1356 {
1357         u32 bmsr;
1358         u8 link_up;
1359
1360         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1361                 bp->link_up = 1;
1362                 return 0;
1363         }
1364
1365         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1366                 return 0;
1367
1368         link_up = bp->link_up;
1369
1370         bnx2_enable_bmsr1(bp);
1371         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1372         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1373         bnx2_disable_bmsr1(bp);
1374
1375         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1376             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1377                 u32 val, an_dbg;
1378
1379                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1380                         bnx2_5706s_force_link_dn(bp, 0);
1381                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1382                 }
1383                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1384
1385                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1386                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1387                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1388
1389                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1390                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1391                         bmsr |= BMSR_LSTATUS;
1392                 else
1393                         bmsr &= ~BMSR_LSTATUS;
1394         }
1395
1396         if (bmsr & BMSR_LSTATUS) {
1397                 bp->link_up = 1;
1398
1399                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1400                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1401                                 bnx2_5706s_linkup(bp);
1402                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1403                                 bnx2_5708s_linkup(bp);
1404                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1405                                 bnx2_5709s_linkup(bp);
1406                 }
1407                 else {
1408                         bnx2_copper_linkup(bp);
1409                 }
1410                 bnx2_resolve_flow_ctrl(bp);
1411         }
1412         else {
1413                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1414                     (bp->autoneg & AUTONEG_SPEED))
1415                         bnx2_disable_forced_2g5(bp);
1416
1417                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1418                         u32 bmcr;
1419
1420                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1421                         bmcr |= BMCR_ANENABLE;
1422                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1423
1424                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1425                 }
1426                 bp->link_up = 0;
1427         }
1428
1429         if (bp->link_up != link_up) {
1430                 bnx2_report_link(bp);
1431         }
1432
1433         bnx2_set_mac_link(bp);
1434
1435         return 0;
1436 }
1437
1438 static int
1439 bnx2_reset_phy(struct bnx2 *bp)
1440 {
1441         int i;
1442         u32 reg;
1443
1444         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1445
1446 #define PHY_RESET_MAX_WAIT 100
1447         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1448                 udelay(10);
1449
1450                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1451                 if (!(reg & BMCR_RESET)) {
1452                         udelay(20);
1453                         break;
1454                 }
1455         }
1456         if (i == PHY_RESET_MAX_WAIT) {
1457                 return -EBUSY;
1458         }
1459         return 0;
1460 }
1461
1462 static u32
1463 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1464 {
1465         u32 adv = 0;
1466
1467         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1468                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1469
1470                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1471                         adv = ADVERTISE_1000XPAUSE;
1472                 }
1473                 else {
1474                         adv = ADVERTISE_PAUSE_CAP;
1475                 }
1476         }
1477         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1478                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1479                         adv = ADVERTISE_1000XPSE_ASYM;
1480                 }
1481                 else {
1482                         adv = ADVERTISE_PAUSE_ASYM;
1483                 }
1484         }
1485         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1486                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1487                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1488                 }
1489                 else {
1490                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1491                 }
1492         }
1493         return adv;
1494 }
1495
1496 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1497
1498 static int
1499 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1500 {
1501         u32 speed_arg = 0, pause_adv;
1502
1503         pause_adv = bnx2_phy_get_pause_adv(bp);
1504
1505         if (bp->autoneg & AUTONEG_SPEED) {
1506                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1507                 if (bp->advertising & ADVERTISED_10baseT_Half)
1508                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1509                 if (bp->advertising & ADVERTISED_10baseT_Full)
1510                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1511                 if (bp->advertising & ADVERTISED_100baseT_Half)
1512                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1513                 if (bp->advertising & ADVERTISED_100baseT_Full)
1514                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1515                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1516                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1517                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1518                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1519         } else {
1520                 if (bp->req_line_speed == SPEED_2500)
1521                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1522                 else if (bp->req_line_speed == SPEED_1000)
1523                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1524                 else if (bp->req_line_speed == SPEED_100) {
1525                         if (bp->req_duplex == DUPLEX_FULL)
1526                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1527                         else
1528                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1529                 } else if (bp->req_line_speed == SPEED_10) {
1530                         if (bp->req_duplex == DUPLEX_FULL)
1531                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1532                         else
1533                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1534                 }
1535         }
1536
1537         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1538                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1539         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1540                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1541
1542         if (port == PORT_TP)
1543                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1544                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1545
1546         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1547
1548         spin_unlock_bh(&bp->phy_lock);
1549         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1550         spin_lock_bh(&bp->phy_lock);
1551
1552         return 0;
1553 }
1554
1555 static int
1556 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1557 {
1558         u32 adv, bmcr;
1559         u32 new_adv = 0;
1560
1561         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1562                 return (bnx2_setup_remote_phy(bp, port));
1563
1564         if (!(bp->autoneg & AUTONEG_SPEED)) {
1565                 u32 new_bmcr;
1566                 int force_link_down = 0;
1567
1568                 if (bp->req_line_speed == SPEED_2500) {
1569                         if (!bnx2_test_and_enable_2g5(bp))
1570                                 force_link_down = 1;
1571                 } else if (bp->req_line_speed == SPEED_1000) {
1572                         if (bnx2_test_and_disable_2g5(bp))
1573                                 force_link_down = 1;
1574                 }
1575                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1576                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1577
1578                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1579                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1580                 new_bmcr |= BMCR_SPEED1000;
1581
1582                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1583                         if (bp->req_line_speed == SPEED_2500)
1584                                 bnx2_enable_forced_2g5(bp);
1585                         else if (bp->req_line_speed == SPEED_1000) {
1586                                 bnx2_disable_forced_2g5(bp);
1587                                 new_bmcr &= ~0x2000;
1588                         }
1589
1590                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1591                         if (bp->req_line_speed == SPEED_2500)
1592                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1593                         else
1594                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1595                 }
1596
1597                 if (bp->req_duplex == DUPLEX_FULL) {
1598                         adv |= ADVERTISE_1000XFULL;
1599                         new_bmcr |= BMCR_FULLDPLX;
1600                 }
1601                 else {
1602                         adv |= ADVERTISE_1000XHALF;
1603                         new_bmcr &= ~BMCR_FULLDPLX;
1604                 }
1605                 if ((new_bmcr != bmcr) || (force_link_down)) {
1606                         /* Force a link down visible on the other side */
1607                         if (bp->link_up) {
1608                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1609                                                ~(ADVERTISE_1000XFULL |
1610                                                  ADVERTISE_1000XHALF));
1611                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1612                                         BMCR_ANRESTART | BMCR_ANENABLE);
1613
1614                                 bp->link_up = 0;
1615                                 netif_carrier_off(bp->dev);
1616                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1617                                 bnx2_report_link(bp);
1618                         }
1619                         bnx2_write_phy(bp, bp->mii_adv, adv);
1620                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1621                 } else {
1622                         bnx2_resolve_flow_ctrl(bp);
1623                         bnx2_set_mac_link(bp);
1624                 }
1625                 return 0;
1626         }
1627
1628         bnx2_test_and_enable_2g5(bp);
1629
1630         if (bp->advertising & ADVERTISED_1000baseT_Full)
1631                 new_adv |= ADVERTISE_1000XFULL;
1632
1633         new_adv |= bnx2_phy_get_pause_adv(bp);
1634
1635         bnx2_read_phy(bp, bp->mii_adv, &adv);
1636         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1637
1638         bp->serdes_an_pending = 0;
1639         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1640                 /* Force a link down visible on the other side */
1641                 if (bp->link_up) {
1642                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1643                         spin_unlock_bh(&bp->phy_lock);
1644                         msleep(20);
1645                         spin_lock_bh(&bp->phy_lock);
1646                 }
1647
1648                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1649                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1650                         BMCR_ANENABLE);
1651                 /* Speed up link-up time when the link partner
1652                  * does not autonegotiate which is very common
1653                  * in blade servers. Some blade servers use
1654                  * IPMI for kerboard input and it's important
1655                  * to minimize link disruptions. Autoneg. involves
1656                  * exchanging base pages plus 3 next pages and
1657                  * normally completes in about 120 msec.
1658                  */
1659                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1660                 bp->serdes_an_pending = 1;
1661                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1662         } else {
1663                 bnx2_resolve_flow_ctrl(bp);
1664                 bnx2_set_mac_link(bp);
1665         }
1666
1667         return 0;
1668 }
1669
1670 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1671         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1672                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1673                 (ADVERTISED_1000baseT_Full)
1674
1675 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1676         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1677         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1678         ADVERTISED_1000baseT_Full)
1679
1680 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1681         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1682
1683 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1684
1685 static void
1686 bnx2_set_default_remote_link(struct bnx2 *bp)
1687 {
1688         u32 link;
1689
1690         if (bp->phy_port == PORT_TP)
1691                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1692         else
1693                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1694
1695         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1696                 bp->req_line_speed = 0;
1697                 bp->autoneg |= AUTONEG_SPEED;
1698                 bp->advertising = ADVERTISED_Autoneg;
1699                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1700                         bp->advertising |= ADVERTISED_10baseT_Half;
1701                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1702                         bp->advertising |= ADVERTISED_10baseT_Full;
1703                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1704                         bp->advertising |= ADVERTISED_100baseT_Half;
1705                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1706                         bp->advertising |= ADVERTISED_100baseT_Full;
1707                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1708                         bp->advertising |= ADVERTISED_1000baseT_Full;
1709                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1710                         bp->advertising |= ADVERTISED_2500baseX_Full;
1711         } else {
1712                 bp->autoneg = 0;
1713                 bp->advertising = 0;
1714                 bp->req_duplex = DUPLEX_FULL;
1715                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1716                         bp->req_line_speed = SPEED_10;
1717                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1718                                 bp->req_duplex = DUPLEX_HALF;
1719                 }
1720                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1721                         bp->req_line_speed = SPEED_100;
1722                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1723                                 bp->req_duplex = DUPLEX_HALF;
1724                 }
1725                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1726                         bp->req_line_speed = SPEED_1000;
1727                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1728                         bp->req_line_speed = SPEED_2500;
1729         }
1730 }
1731
1732 static void
1733 bnx2_set_default_link(struct bnx2 *bp)
1734 {
1735         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1736                 bnx2_set_default_remote_link(bp);
1737                 return;
1738         }
1739
1740         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1741         bp->req_line_speed = 0;
1742         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1743                 u32 reg;
1744
1745                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1746
1747                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1748                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1749                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1750                         bp->autoneg = 0;
1751                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1752                         bp->req_duplex = DUPLEX_FULL;
1753                 }
1754         } else
1755                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1756 }
1757
1758 static void
1759 bnx2_send_heart_beat(struct bnx2 *bp)
1760 {
1761         u32 msg;
1762         u32 addr;
1763
1764         spin_lock(&bp->indirect_lock);
1765         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1766         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1767         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1768         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1769         spin_unlock(&bp->indirect_lock);
1770 }
1771
1772 static void
1773 bnx2_remote_phy_event(struct bnx2 *bp)
1774 {
1775         u32 msg;
1776         u8 link_up = bp->link_up;
1777         u8 old_port;
1778
1779         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1780
1781         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1782                 bnx2_send_heart_beat(bp);
1783
1784         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1785
1786         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1787                 bp->link_up = 0;
1788         else {
1789                 u32 speed;
1790
1791                 bp->link_up = 1;
1792                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1793                 bp->duplex = DUPLEX_FULL;
1794                 switch (speed) {
1795                         case BNX2_LINK_STATUS_10HALF:
1796                                 bp->duplex = DUPLEX_HALF;
1797                         case BNX2_LINK_STATUS_10FULL:
1798                                 bp->line_speed = SPEED_10;
1799                                 break;
1800                         case BNX2_LINK_STATUS_100HALF:
1801                                 bp->duplex = DUPLEX_HALF;
1802                         case BNX2_LINK_STATUS_100BASE_T4:
1803                         case BNX2_LINK_STATUS_100FULL:
1804                                 bp->line_speed = SPEED_100;
1805                                 break;
1806                         case BNX2_LINK_STATUS_1000HALF:
1807                                 bp->duplex = DUPLEX_HALF;
1808                         case BNX2_LINK_STATUS_1000FULL:
1809                                 bp->line_speed = SPEED_1000;
1810                                 break;
1811                         case BNX2_LINK_STATUS_2500HALF:
1812                                 bp->duplex = DUPLEX_HALF;
1813                         case BNX2_LINK_STATUS_2500FULL:
1814                                 bp->line_speed = SPEED_2500;
1815                                 break;
1816                         default:
1817                                 bp->line_speed = 0;
1818                                 break;
1819                 }
1820
1821                 bp->flow_ctrl = 0;
1822                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1823                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1824                         if (bp->duplex == DUPLEX_FULL)
1825                                 bp->flow_ctrl = bp->req_flow_ctrl;
1826                 } else {
1827                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1828                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1829                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1830                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1831                 }
1832
1833                 old_port = bp->phy_port;
1834                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1835                         bp->phy_port = PORT_FIBRE;
1836                 else
1837                         bp->phy_port = PORT_TP;
1838
1839                 if (old_port != bp->phy_port)
1840                         bnx2_set_default_link(bp);
1841
1842         }
1843         if (bp->link_up != link_up)
1844                 bnx2_report_link(bp);
1845
1846         bnx2_set_mac_link(bp);
1847 }
1848
1849 static int
1850 bnx2_set_remote_link(struct bnx2 *bp)
1851 {
1852         u32 evt_code;
1853
1854         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1855         switch (evt_code) {
1856                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1857                         bnx2_remote_phy_event(bp);
1858                         break;
1859                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1860                 default:
1861                         bnx2_send_heart_beat(bp);
1862                         break;
1863         }
1864         return 0;
1865 }
1866
1867 static int
1868 bnx2_setup_copper_phy(struct bnx2 *bp)
1869 {
1870         u32 bmcr;
1871         u32 new_bmcr;
1872
1873         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1874
1875         if (bp->autoneg & AUTONEG_SPEED) {
1876                 u32 adv_reg, adv1000_reg;
1877                 u32 new_adv_reg = 0;
1878                 u32 new_adv1000_reg = 0;
1879
1880                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1881                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1882                         ADVERTISE_PAUSE_ASYM);
1883
1884                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1885                 adv1000_reg &= PHY_ALL_1000_SPEED;
1886
1887                 if (bp->advertising & ADVERTISED_10baseT_Half)
1888                         new_adv_reg |= ADVERTISE_10HALF;
1889                 if (bp->advertising & ADVERTISED_10baseT_Full)
1890                         new_adv_reg |= ADVERTISE_10FULL;
1891                 if (bp->advertising & ADVERTISED_100baseT_Half)
1892                         new_adv_reg |= ADVERTISE_100HALF;
1893                 if (bp->advertising & ADVERTISED_100baseT_Full)
1894                         new_adv_reg |= ADVERTISE_100FULL;
1895                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1896                         new_adv1000_reg |= ADVERTISE_1000FULL;
1897
1898                 new_adv_reg |= ADVERTISE_CSMA;
1899
1900                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1901
1902                 if ((adv1000_reg != new_adv1000_reg) ||
1903                         (adv_reg != new_adv_reg) ||
1904                         ((bmcr & BMCR_ANENABLE) == 0)) {
1905
1906                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1907                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1908                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1909                                 BMCR_ANENABLE);
1910                 }
1911                 else if (bp->link_up) {
1912                         /* Flow ctrl may have changed from auto to forced */
1913                         /* or vice-versa. */
1914
1915                         bnx2_resolve_flow_ctrl(bp);
1916                         bnx2_set_mac_link(bp);
1917                 }
1918                 return 0;
1919         }
1920
1921         new_bmcr = 0;
1922         if (bp->req_line_speed == SPEED_100) {
1923                 new_bmcr |= BMCR_SPEED100;
1924         }
1925         if (bp->req_duplex == DUPLEX_FULL) {
1926                 new_bmcr |= BMCR_FULLDPLX;
1927         }
1928         if (new_bmcr != bmcr) {
1929                 u32 bmsr;
1930
1931                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1932                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1933
1934                 if (bmsr & BMSR_LSTATUS) {
1935                         /* Force link down */
1936                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1937                         spin_unlock_bh(&bp->phy_lock);
1938                         msleep(50);
1939                         spin_lock_bh(&bp->phy_lock);
1940
1941                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1942                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1943                 }
1944
1945                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1946
1947                 /* Normally, the new speed is setup after the link has
1948                  * gone down and up again. In some cases, link will not go
1949                  * down so we need to set up the new speed here.
1950                  */
1951                 if (bmsr & BMSR_LSTATUS) {
1952                         bp->line_speed = bp->req_line_speed;
1953                         bp->duplex = bp->req_duplex;
1954                         bnx2_resolve_flow_ctrl(bp);
1955                         bnx2_set_mac_link(bp);
1956                 }
1957         } else {
1958                 bnx2_resolve_flow_ctrl(bp);
1959                 bnx2_set_mac_link(bp);
1960         }
1961         return 0;
1962 }
1963
1964 static int
1965 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1966 {
1967         if (bp->loopback == MAC_LOOPBACK)
1968                 return 0;
1969
1970         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1971                 return (bnx2_setup_serdes_phy(bp, port));
1972         }
1973         else {
1974                 return (bnx2_setup_copper_phy(bp));
1975         }
1976 }
1977
1978 static int
1979 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1980 {
1981         u32 val;
1982
1983         bp->mii_bmcr = MII_BMCR + 0x10;
1984         bp->mii_bmsr = MII_BMSR + 0x10;
1985         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1986         bp->mii_adv = MII_ADVERTISE + 0x10;
1987         bp->mii_lpa = MII_LPA + 0x10;
1988         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1989
1990         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1991         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1992
1993         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1994         if (reset_phy)
1995                 bnx2_reset_phy(bp);
1996
1997         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1998
1999         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2000         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2001         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2002         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2003
2004         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2005         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2006         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2007                 val |= BCM5708S_UP1_2G5;
2008         else
2009                 val &= ~BCM5708S_UP1_2G5;
2010         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2011
2012         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2013         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2014         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2015         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2016
2017         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2018
2019         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2020               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2021         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2022
2023         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2024
2025         return 0;
2026 }
2027
2028 static int
2029 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2030 {
2031         u32 val;
2032
2033         if (reset_phy)
2034                 bnx2_reset_phy(bp);
2035
2036         bp->mii_up1 = BCM5708S_UP1;
2037
2038         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2039         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2040         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2041
2042         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2043         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2044         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2045
2046         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2047         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2048         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2049
2050         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2051                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2052                 val |= BCM5708S_UP1_2G5;
2053                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2054         }
2055
2056         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2057             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2058             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2059                 /* increase tx signal amplitude */
2060                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2061                                BCM5708S_BLK_ADDR_TX_MISC);
2062                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2063                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2064                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2065                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2066         }
2067
2068         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2069               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2070
2071         if (val) {
2072                 u32 is_backplane;
2073
2074                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2075                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2076                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2077                                        BCM5708S_BLK_ADDR_TX_MISC);
2078                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2079                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2080                                        BCM5708S_BLK_ADDR_DIG);
2081                 }
2082         }
2083         return 0;
2084 }
2085
2086 static int
2087 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2088 {
2089         if (reset_phy)
2090                 bnx2_reset_phy(bp);
2091
2092         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2093
2094         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2095                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2096
2097         if (bp->dev->mtu > 1500) {
2098                 u32 val;
2099
2100                 /* Set extended packet length bit */
2101                 bnx2_write_phy(bp, 0x18, 0x7);
2102                 bnx2_read_phy(bp, 0x18, &val);
2103                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2104
2105                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2106                 bnx2_read_phy(bp, 0x1c, &val);
2107                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2108         }
2109         else {
2110                 u32 val;
2111
2112                 bnx2_write_phy(bp, 0x18, 0x7);
2113                 bnx2_read_phy(bp, 0x18, &val);
2114                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2115
2116                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2117                 bnx2_read_phy(bp, 0x1c, &val);
2118                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2119         }
2120
2121         return 0;
2122 }
2123
2124 static int
2125 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2126 {
2127         u32 val;
2128
2129         if (reset_phy)
2130                 bnx2_reset_phy(bp);
2131
2132         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2133                 bnx2_write_phy(bp, 0x18, 0x0c00);
2134                 bnx2_write_phy(bp, 0x17, 0x000a);
2135                 bnx2_write_phy(bp, 0x15, 0x310b);
2136                 bnx2_write_phy(bp, 0x17, 0x201f);
2137                 bnx2_write_phy(bp, 0x15, 0x9506);
2138                 bnx2_write_phy(bp, 0x17, 0x401f);
2139                 bnx2_write_phy(bp, 0x15, 0x14e2);
2140                 bnx2_write_phy(bp, 0x18, 0x0400);
2141         }
2142
2143         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2144                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2145                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2146                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2147                 val &= ~(1 << 8);
2148                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2149         }
2150
2151         if (bp->dev->mtu > 1500) {
2152                 /* Set extended packet length bit */
2153                 bnx2_write_phy(bp, 0x18, 0x7);
2154                 bnx2_read_phy(bp, 0x18, &val);
2155                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2156
2157                 bnx2_read_phy(bp, 0x10, &val);
2158                 bnx2_write_phy(bp, 0x10, val | 0x1);
2159         }
2160         else {
2161                 bnx2_write_phy(bp, 0x18, 0x7);
2162                 bnx2_read_phy(bp, 0x18, &val);
2163                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2164
2165                 bnx2_read_phy(bp, 0x10, &val);
2166                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2167         }
2168
2169         /* ethernet@wirespeed */
2170         bnx2_write_phy(bp, 0x18, 0x7007);
2171         bnx2_read_phy(bp, 0x18, &val);
2172         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2173         return 0;
2174 }
2175
2176
2177 static int
2178 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2179 {
2180         u32 val;
2181         int rc = 0;
2182
2183         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2184         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2185
2186         bp->mii_bmcr = MII_BMCR;
2187         bp->mii_bmsr = MII_BMSR;
2188         bp->mii_bmsr1 = MII_BMSR;
2189         bp->mii_adv = MII_ADVERTISE;
2190         bp->mii_lpa = MII_LPA;
2191
2192         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2193
2194         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2195                 goto setup_phy;
2196
2197         bnx2_read_phy(bp, MII_PHYSID1, &val);
2198         bp->phy_id = val << 16;
2199         bnx2_read_phy(bp, MII_PHYSID2, &val);
2200         bp->phy_id |= val & 0xffff;
2201
2202         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2203                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2204                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2205                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2206                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2207                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2208                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2209         }
2210         else {
2211                 rc = bnx2_init_copper_phy(bp, reset_phy);
2212         }
2213
2214 setup_phy:
2215         if (!rc)
2216                 rc = bnx2_setup_phy(bp, bp->phy_port);
2217
2218         return rc;
2219 }
2220
2221 static int
2222 bnx2_set_mac_loopback(struct bnx2 *bp)
2223 {
2224         u32 mac_mode;
2225
2226         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2227         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2228         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2229         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2230         bp->link_up = 1;
2231         return 0;
2232 }
2233
2234 static int bnx2_test_link(struct bnx2 *);
2235
2236 static int
2237 bnx2_set_phy_loopback(struct bnx2 *bp)
2238 {
2239         u32 mac_mode;
2240         int rc, i;
2241
2242         spin_lock_bh(&bp->phy_lock);
2243         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2244                             BMCR_SPEED1000);
2245         spin_unlock_bh(&bp->phy_lock);
2246         if (rc)
2247                 return rc;
2248
2249         for (i = 0; i < 10; i++) {
2250                 if (bnx2_test_link(bp) == 0)
2251                         break;
2252                 msleep(100);
2253         }
2254
2255         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2256         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2257                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2258                       BNX2_EMAC_MODE_25G_MODE);
2259
2260         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2261         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2262         bp->link_up = 1;
2263         return 0;
2264 }
2265
2266 static int
2267 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2268 {
2269         int i;
2270         u32 val;
2271
2272         bp->fw_wr_seq++;
2273         msg_data |= bp->fw_wr_seq;
2274
2275         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2276
2277         if (!ack)
2278                 return 0;
2279
2280         /* wait for an acknowledgement. */
2281         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2282                 msleep(10);
2283
2284                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2285
2286                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2287                         break;
2288         }
2289         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2290                 return 0;
2291
2292         /* If we timed out, inform the firmware that this is the case. */
2293         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2294                 if (!silent)
2295                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2296                                             "%x\n", msg_data);
2297
2298                 msg_data &= ~BNX2_DRV_MSG_CODE;
2299                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2300
2301                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2302
2303                 return -EBUSY;
2304         }
2305
2306         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2307                 return -EIO;
2308
2309         return 0;
2310 }
2311
2312 static int
2313 bnx2_init_5709_context(struct bnx2 *bp)
2314 {
2315         int i, ret = 0;
2316         u32 val;
2317
2318         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2319         val |= (BCM_PAGE_BITS - 8) << 16;
2320         REG_WR(bp, BNX2_CTX_COMMAND, val);
2321         for (i = 0; i < 10; i++) {
2322                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2323                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2324                         break;
2325                 udelay(2);
2326         }
2327         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2328                 return -EBUSY;
2329
2330         for (i = 0; i < bp->ctx_pages; i++) {
2331                 int j;
2332
2333                 if (bp->ctx_blk[i])
2334                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2335                 else
2336                         return -ENOMEM;
2337
2338                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2339                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2340                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2341                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2342                        (u64) bp->ctx_blk_mapping[i] >> 32);
2343                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2344                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2345                 for (j = 0; j < 10; j++) {
2346
2347                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2348                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2349                                 break;
2350                         udelay(5);
2351                 }
2352                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2353                         ret = -EBUSY;
2354                         break;
2355                 }
2356         }
2357         return ret;
2358 }
2359
2360 static void
2361 bnx2_init_context(struct bnx2 *bp)
2362 {
2363         u32 vcid;
2364
2365         vcid = 96;
2366         while (vcid) {
2367                 u32 vcid_addr, pcid_addr, offset;
2368                 int i;
2369
2370                 vcid--;
2371
2372                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2373                         u32 new_vcid;
2374
2375                         vcid_addr = GET_PCID_ADDR(vcid);
2376                         if (vcid & 0x8) {
2377                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2378                         }
2379                         else {
2380                                 new_vcid = vcid;
2381                         }
2382                         pcid_addr = GET_PCID_ADDR(new_vcid);
2383                 }
2384                 else {
2385                         vcid_addr = GET_CID_ADDR(vcid);
2386                         pcid_addr = vcid_addr;
2387                 }
2388
2389                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2390                         vcid_addr += (i << PHY_CTX_SHIFT);
2391                         pcid_addr += (i << PHY_CTX_SHIFT);
2392
2393                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2394                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2395
2396                         /* Zero out the context. */
2397                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2398                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2399                 }
2400         }
2401 }
2402
2403 static int
2404 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2405 {
2406         u16 *good_mbuf;
2407         u32 good_mbuf_cnt;
2408         u32 val;
2409
2410         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2411         if (good_mbuf == NULL) {
2412                 printk(KERN_ERR PFX "Failed to allocate memory in "
2413                                     "bnx2_alloc_bad_rbuf\n");
2414                 return -ENOMEM;
2415         }
2416
2417         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2418                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2419
2420         good_mbuf_cnt = 0;
2421
2422         /* Allocate a bunch of mbufs and save the good ones in an array. */
2423         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2424         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2425                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2426                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2427
2428                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2429
2430                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2431
2432                 /* The addresses with Bit 9 set are bad memory blocks. */
2433                 if (!(val & (1 << 9))) {
2434                         good_mbuf[good_mbuf_cnt] = (u16) val;
2435                         good_mbuf_cnt++;
2436                 }
2437
2438                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2439         }
2440
2441         /* Free the good ones back to the mbuf pool thus discarding
2442          * all the bad ones. */
2443         while (good_mbuf_cnt) {
2444                 good_mbuf_cnt--;
2445
2446                 val = good_mbuf[good_mbuf_cnt];
2447                 val = (val << 9) | val | 1;
2448
2449                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2450         }
2451         kfree(good_mbuf);
2452         return 0;
2453 }
2454
2455 static void
2456 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2457 {
2458         u32 val;
2459
2460         val = (mac_addr[0] << 8) | mac_addr[1];
2461
2462         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2463
2464         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2465                 (mac_addr[4] << 8) | mac_addr[5];
2466
2467         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2468 }
2469
2470 static inline int
2471 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2472 {
2473         dma_addr_t mapping;
2474         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2475         struct rx_bd *rxbd =
2476                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2477         struct page *page = alloc_page(GFP_ATOMIC);
2478
2479         if (!page)
2480                 return -ENOMEM;
2481         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2482                                PCI_DMA_FROMDEVICE);
2483         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2484                 __free_page(page);
2485                 return -EIO;
2486         }
2487
2488         rx_pg->page = page;
2489         pci_unmap_addr_set(rx_pg, mapping, mapping);
2490         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2491         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2492         return 0;
2493 }
2494
2495 static void
2496 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2497 {
2498         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2499         struct page *page = rx_pg->page;
2500
2501         if (!page)
2502                 return;
2503
2504         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2505                        PCI_DMA_FROMDEVICE);
2506
2507         __free_page(page);
2508         rx_pg->page = NULL;
2509 }
2510
2511 static inline int
2512 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2513 {
2514         struct sk_buff *skb;
2515         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2516         dma_addr_t mapping;
2517         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2518         unsigned long align;
2519
2520         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2521         if (skb == NULL) {
2522                 return -ENOMEM;
2523         }
2524
2525         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2526                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2527
2528         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2529                 PCI_DMA_FROMDEVICE);
2530         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2531                 dev_kfree_skb(skb);
2532                 return -EIO;
2533         }
2534
2535         rx_buf->skb = skb;
2536         pci_unmap_addr_set(rx_buf, mapping, mapping);
2537
2538         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2539         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2540
2541         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2542
2543         return 0;
2544 }
2545
2546 static int
2547 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2548 {
2549         struct status_block *sblk = bnapi->status_blk.msi;
2550         u32 new_link_state, old_link_state;
2551         int is_set = 1;
2552
2553         new_link_state = sblk->status_attn_bits & event;
2554         old_link_state = sblk->status_attn_bits_ack & event;
2555         if (new_link_state != old_link_state) {
2556                 if (new_link_state)
2557                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2558                 else
2559                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2560         } else
2561                 is_set = 0;
2562
2563         return is_set;
2564 }
2565
2566 static void
2567 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2568 {
2569         spin_lock(&bp->phy_lock);
2570
2571         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2572                 bnx2_set_link(bp);
2573         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2574                 bnx2_set_remote_link(bp);
2575
2576         spin_unlock(&bp->phy_lock);
2577
2578 }
2579
2580 static inline u16
2581 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2582 {
2583         u16 cons;
2584
2585         /* Tell compiler that status block fields can change. */
2586         barrier();
2587         cons = *bnapi->hw_tx_cons_ptr;
2588         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2589                 cons++;
2590         return cons;
2591 }
2592
2593 static int
2594 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2595 {
2596         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2597         u16 hw_cons, sw_cons, sw_ring_cons;
2598         int tx_pkt = 0, index;
2599         struct netdev_queue *txq;
2600
2601         index = (bnapi - bp->bnx2_napi);
2602         txq = netdev_get_tx_queue(bp->dev, index);
2603
2604         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2605         sw_cons = txr->tx_cons;
2606
2607         while (sw_cons != hw_cons) {
2608                 struct sw_tx_bd *tx_buf;
2609                 struct sk_buff *skb;
2610                 int i, last;
2611
2612                 sw_ring_cons = TX_RING_IDX(sw_cons);
2613
2614                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2615                 skb = tx_buf->skb;
2616
2617                 /* partial BD completions possible with TSO packets */
2618                 if (skb_is_gso(skb)) {
2619                         u16 last_idx, last_ring_idx;
2620
2621                         last_idx = sw_cons +
2622                                 skb_shinfo(skb)->nr_frags + 1;
2623                         last_ring_idx = sw_ring_cons +
2624                                 skb_shinfo(skb)->nr_frags + 1;
2625                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2626                                 last_idx++;
2627                         }
2628                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2629                                 break;
2630                         }
2631                 }
2632
2633                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2634
2635                 tx_buf->skb = NULL;
2636                 last = skb_shinfo(skb)->nr_frags;
2637
2638                 for (i = 0; i < last; i++) {
2639                         sw_cons = NEXT_TX_BD(sw_cons);
2640                 }
2641
2642                 sw_cons = NEXT_TX_BD(sw_cons);
2643
2644                 dev_kfree_skb(skb);
2645                 tx_pkt++;
2646                 if (tx_pkt == budget)
2647                         break;
2648
2649                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2650         }
2651
2652         txr->hw_tx_cons = hw_cons;
2653         txr->tx_cons = sw_cons;
2654
2655         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2656          * before checking for netif_tx_queue_stopped().  Without the
2657          * memory barrier, there is a small possibility that bnx2_start_xmit()
2658          * will miss it and cause the queue to be stopped forever.
2659          */
2660         smp_mb();
2661
2662         if (unlikely(netif_tx_queue_stopped(txq)) &&
2663                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2664                 __netif_tx_lock(txq, smp_processor_id());
2665                 if ((netif_tx_queue_stopped(txq)) &&
2666                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2667                         netif_tx_wake_queue(txq);
2668                 __netif_tx_unlock(txq);
2669         }
2670
2671         return tx_pkt;
2672 }
2673
2674 static void
2675 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2676                         struct sk_buff *skb, int count)
2677 {
2678         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2679         struct rx_bd *cons_bd, *prod_bd;
2680         int i;
2681         u16 hw_prod, prod;
2682         u16 cons = rxr->rx_pg_cons;
2683
2684         cons_rx_pg = &rxr->rx_pg_ring[cons];
2685
2686         /* The caller was unable to allocate a new page to replace the
2687          * last one in the frags array, so we need to recycle that page
2688          * and then free the skb.
2689          */
2690         if (skb) {
2691                 struct page *page;
2692                 struct skb_shared_info *shinfo;
2693
2694                 shinfo = skb_shinfo(skb);
2695                 shinfo->nr_frags--;
2696                 page = shinfo->frags[shinfo->nr_frags].page;
2697                 shinfo->frags[shinfo->nr_frags].page = NULL;
2698
2699                 cons_rx_pg->page = page;
2700                 dev_kfree_skb(skb);
2701         }
2702
2703         hw_prod = rxr->rx_pg_prod;
2704
2705         for (i = 0; i < count; i++) {
2706                 prod = RX_PG_RING_IDX(hw_prod);
2707
2708                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2709                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2710                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2711                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2712
2713                 if (prod != cons) {
2714                         prod_rx_pg->page = cons_rx_pg->page;
2715                         cons_rx_pg->page = NULL;
2716                         pci_unmap_addr_set(prod_rx_pg, mapping,
2717                                 pci_unmap_addr(cons_rx_pg, mapping));
2718
2719                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2720                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2721
2722                 }
2723                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2724                 hw_prod = NEXT_RX_BD(hw_prod);
2725         }
2726         rxr->rx_pg_prod = hw_prod;
2727         rxr->rx_pg_cons = cons;
2728 }
2729
2730 static inline void
2731 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2732                   struct sk_buff *skb, u16 cons, u16 prod)
2733 {
2734         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2735         struct rx_bd *cons_bd, *prod_bd;
2736
2737         cons_rx_buf = &rxr->rx_buf_ring[cons];
2738         prod_rx_buf = &rxr->rx_buf_ring[prod];
2739
2740         pci_dma_sync_single_for_device(bp->pdev,
2741                 pci_unmap_addr(cons_rx_buf, mapping),
2742                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2743
2744         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2745
2746         prod_rx_buf->skb = skb;
2747
2748         if (cons == prod)
2749                 return;
2750
2751         pci_unmap_addr_set(prod_rx_buf, mapping,
2752                         pci_unmap_addr(cons_rx_buf, mapping));
2753
2754         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2755         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2756         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2757         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2758 }
2759
2760 static int
2761 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2762             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2763             u32 ring_idx)
2764 {
2765         int err;
2766         u16 prod = ring_idx & 0xffff;
2767
2768         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2769         if (unlikely(err)) {
2770                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2771                 if (hdr_len) {
2772                         unsigned int raw_len = len + 4;
2773                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2774
2775                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2776                 }
2777                 return err;
2778         }
2779
2780         skb_reserve(skb, BNX2_RX_OFFSET);
2781         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2782                          PCI_DMA_FROMDEVICE);
2783
2784         if (hdr_len == 0) {
2785                 skb_put(skb, len);
2786                 return 0;
2787         } else {
2788                 unsigned int i, frag_len, frag_size, pages;
2789                 struct sw_pg *rx_pg;
2790                 u16 pg_cons = rxr->rx_pg_cons;
2791                 u16 pg_prod = rxr->rx_pg_prod;
2792
2793                 frag_size = len + 4 - hdr_len;
2794                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2795                 skb_put(skb, hdr_len);
2796
2797                 for (i = 0; i < pages; i++) {
2798                         dma_addr_t mapping_old;
2799
2800                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2801                         if (unlikely(frag_len <= 4)) {
2802                                 unsigned int tail = 4 - frag_len;
2803
2804                                 rxr->rx_pg_cons = pg_cons;
2805                                 rxr->rx_pg_prod = pg_prod;
2806                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2807                                                         pages - i);
2808                                 skb->len -= tail;
2809                                 if (i == 0) {
2810                                         skb->tail -= tail;
2811                                 } else {
2812                                         skb_frag_t *frag =
2813                                                 &skb_shinfo(skb)->frags[i - 1];
2814                                         frag->size -= tail;
2815                                         skb->data_len -= tail;
2816                                         skb->truesize -= tail;
2817                                 }
2818                                 return 0;
2819                         }
2820                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2821
2822                         /* Don't unmap yet.  If we're unable to allocate a new
2823                          * page, we need to recycle the page and the DMA addr.
2824                          */
2825                         mapping_old = pci_unmap_addr(rx_pg, mapping);
2826                         if (i == pages - 1)
2827                                 frag_len -= 4;
2828
2829                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2830                         rx_pg->page = NULL;
2831
2832                         err = bnx2_alloc_rx_page(bp, rxr,
2833                                                  RX_PG_RING_IDX(pg_prod));
2834                         if (unlikely(err)) {
2835                                 rxr->rx_pg_cons = pg_cons;
2836                                 rxr->rx_pg_prod = pg_prod;
2837                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2838                                                         pages - i);
2839                                 return err;
2840                         }
2841
2842                         pci_unmap_page(bp->pdev, mapping_old,
2843                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2844
2845                         frag_size -= frag_len;
2846                         skb->data_len += frag_len;
2847                         skb->truesize += frag_len;
2848                         skb->len += frag_len;
2849
2850                         pg_prod = NEXT_RX_BD(pg_prod);
2851                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2852                 }
2853                 rxr->rx_pg_prod = pg_prod;
2854                 rxr->rx_pg_cons = pg_cons;
2855         }
2856         return 0;
2857 }
2858
2859 static inline u16
2860 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2861 {
2862         u16 cons;
2863
2864         /* Tell compiler that status block fields can change. */
2865         barrier();
2866         cons = *bnapi->hw_rx_cons_ptr;
2867         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2868                 cons++;
2869         return cons;
2870 }
2871
2872 static int
2873 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2874 {
2875         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2876         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2877         struct l2_fhdr *rx_hdr;
2878         int rx_pkt = 0, pg_ring_used = 0;
2879
2880         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2881         sw_cons = rxr->rx_cons;
2882         sw_prod = rxr->rx_prod;
2883
2884         /* Memory barrier necessary as speculative reads of the rx
2885          * buffer can be ahead of the index in the status block
2886          */
2887         rmb();
2888         while (sw_cons != hw_cons) {
2889                 unsigned int len, hdr_len;
2890                 u32 status;
2891                 struct sw_bd *rx_buf;
2892                 struct sk_buff *skb;
2893                 dma_addr_t dma_addr;
2894                 u16 vtag = 0;
2895                 int hw_vlan __maybe_unused = 0;
2896
2897                 sw_ring_cons = RX_RING_IDX(sw_cons);
2898                 sw_ring_prod = RX_RING_IDX(sw_prod);
2899
2900                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2901                 skb = rx_buf->skb;
2902
2903                 rx_buf->skb = NULL;
2904
2905                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2906
2907                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2908                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2909                         PCI_DMA_FROMDEVICE);
2910
2911                 rx_hdr = (struct l2_fhdr *) skb->data;
2912                 len = rx_hdr->l2_fhdr_pkt_len;
2913                 status = rx_hdr->l2_fhdr_status;
2914
2915                 hdr_len = 0;
2916                 if (status & L2_FHDR_STATUS_SPLIT) {
2917                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2918                         pg_ring_used = 1;
2919                 } else if (len > bp->rx_jumbo_thresh) {
2920                         hdr_len = bp->rx_jumbo_thresh;
2921                         pg_ring_used = 1;
2922                 }
2923
2924                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
2925                                        L2_FHDR_ERRORS_PHY_DECODE |
2926                                        L2_FHDR_ERRORS_ALIGNMENT |
2927                                        L2_FHDR_ERRORS_TOO_SHORT |
2928                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
2929
2930                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2931                                           sw_ring_prod);
2932                         if (pg_ring_used) {
2933                                 int pages;
2934
2935                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
2936
2937                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2938                         }
2939                         goto next_rx;
2940                 }
2941
2942                 len -= 4;
2943
2944                 if (len <= bp->rx_copy_thresh) {
2945                         struct sk_buff *new_skb;
2946
2947                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
2948                         if (new_skb == NULL) {
2949                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2950                                                   sw_ring_prod);
2951                                 goto next_rx;
2952                         }
2953
2954                         /* aligned copy */
2955                         skb_copy_from_linear_data_offset(skb,
2956                                                          BNX2_RX_OFFSET - 6,
2957                                       new_skb->data, len + 6);
2958                         skb_reserve(new_skb, 6);
2959                         skb_put(new_skb, len);
2960
2961                         bnx2_reuse_rx_skb(bp, rxr, skb,
2962                                 sw_ring_cons, sw_ring_prod);
2963
2964                         skb = new_skb;
2965                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2966                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2967                         goto next_rx;
2968
2969                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2970                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2971                         vtag = rx_hdr->l2_fhdr_vlan_tag;
2972 #ifdef BCM_VLAN
2973                         if (bp->vlgrp)
2974                                 hw_vlan = 1;
2975                         else
2976 #endif
2977                         {
2978                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2979                                         __skb_push(skb, 4);
2980
2981                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
2982                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
2983                                 ve->h_vlan_TCI = htons(vtag);
2984                                 len += 4;
2985                         }
2986                 }
2987
2988                 skb->protocol = eth_type_trans(skb, bp->dev);
2989
2990                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2991                         (ntohs(skb->protocol) != 0x8100)) {
2992
2993                         dev_kfree_skb(skb);
2994                         goto next_rx;
2995
2996                 }
2997
2998                 skb->ip_summed = CHECKSUM_NONE;
2999                 if (bp->rx_csum &&
3000                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3001                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3002
3003                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3004                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3005                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3006                 }
3007
3008 #ifdef BCM_VLAN
3009                 if (hw_vlan)
3010                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3011                 else
3012 #endif
3013                         netif_receive_skb(skb);
3014
3015                 rx_pkt++;
3016
3017 next_rx:
3018                 sw_cons = NEXT_RX_BD(sw_cons);
3019                 sw_prod = NEXT_RX_BD(sw_prod);
3020
3021                 if ((rx_pkt == budget))
3022                         break;
3023
3024                 /* Refresh hw_cons to see if there is new work */
3025                 if (sw_cons == hw_cons) {
3026                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3027                         rmb();
3028                 }
3029         }
3030         rxr->rx_cons = sw_cons;
3031         rxr->rx_prod = sw_prod;
3032
3033         if (pg_ring_used)
3034                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3035
3036         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3037
3038         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3039
3040         mmiowb();
3041
3042         return rx_pkt;
3043
3044 }
3045
3046 /* MSI ISR - The only difference between this and the INTx ISR
3047  * is that the MSI interrupt is always serviced.
3048  */
3049 static irqreturn_t
3050 bnx2_msi(int irq, void *dev_instance)
3051 {
3052         struct bnx2_napi *bnapi = dev_instance;
3053         struct bnx2 *bp = bnapi->bp;
3054
3055         prefetch(bnapi->status_blk.msi);
3056         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3057                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3058                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3059
3060         /* Return here if interrupt is disabled. */
3061         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3062                 return IRQ_HANDLED;
3063
3064         netif_rx_schedule(&bnapi->napi);
3065
3066         return IRQ_HANDLED;
3067 }
3068
3069 static irqreturn_t
3070 bnx2_msi_1shot(int irq, void *dev_instance)
3071 {
3072         struct bnx2_napi *bnapi = dev_instance;
3073         struct bnx2 *bp = bnapi->bp;
3074
3075         prefetch(bnapi->status_blk.msi);
3076
3077         /* Return here if interrupt is disabled. */
3078         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3079                 return IRQ_HANDLED;
3080
3081         netif_rx_schedule(&bnapi->napi);
3082
3083         return IRQ_HANDLED;
3084 }
3085
3086 static irqreturn_t
3087 bnx2_interrupt(int irq, void *dev_instance)
3088 {
3089         struct bnx2_napi *bnapi = dev_instance;
3090         struct bnx2 *bp = bnapi->bp;
3091         struct status_block *sblk = bnapi->status_blk.msi;
3092
3093         /* When using INTx, it is possible for the interrupt to arrive
3094          * at the CPU before the status block posted prior to the
3095          * interrupt. Reading a register will flush the status block.
3096          * When using MSI, the MSI message will always complete after
3097          * the status block write.
3098          */
3099         if ((sblk->status_idx == bnapi->last_status_idx) &&
3100             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3101              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3102                 return IRQ_NONE;
3103
3104         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3105                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3106                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3107
3108         /* Read back to deassert IRQ immediately to avoid too many
3109          * spurious interrupts.
3110          */
3111         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3112
3113         /* Return here if interrupt is shared and is disabled. */
3114         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3115                 return IRQ_HANDLED;
3116
3117         if (netif_rx_schedule_prep(&bnapi->napi)) {
3118                 bnapi->last_status_idx = sblk->status_idx;
3119                 __netif_rx_schedule(&bnapi->napi);
3120         }
3121
3122         return IRQ_HANDLED;
3123 }
3124
3125 static inline int
3126 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3127 {
3128         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3129         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3130
3131         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3132             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3133                 return 1;
3134         return 0;
3135 }
3136
3137 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3138                                  STATUS_ATTN_BITS_TIMER_ABORT)
3139
3140 static inline int
3141 bnx2_has_work(struct bnx2_napi *bnapi)
3142 {
3143         struct status_block *sblk = bnapi->status_blk.msi;
3144
3145         if (bnx2_has_fast_work(bnapi))
3146                 return 1;
3147
3148         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3149             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3150                 return 1;
3151
3152         return 0;
3153 }
3154
3155 static void
3156 bnx2_chk_missed_msi(struct bnx2 *bp)
3157 {
3158         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3159         u32 msi_ctrl;
3160
3161         if (bnx2_has_work(bnapi)) {
3162                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3163                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3164                         return;
3165
3166                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3167                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3168                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3169                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3170                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3171                 }
3172         }
3173
3174         bp->idle_chk_status_idx = bnapi->last_status_idx;
3175 }
3176
3177 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3178 {
3179         struct status_block *sblk = bnapi->status_blk.msi;
3180         u32 status_attn_bits = sblk->status_attn_bits;
3181         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3182
3183         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3184             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3185
3186                 bnx2_phy_int(bp, bnapi);
3187
3188                 /* This is needed to take care of transient status
3189                  * during link changes.
3190                  */
3191                 REG_WR(bp, BNX2_HC_COMMAND,
3192                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3193                 REG_RD(bp, BNX2_HC_COMMAND);
3194         }
3195 }
3196
3197 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3198                           int work_done, int budget)
3199 {
3200         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3201         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3202
3203         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3204                 bnx2_tx_int(bp, bnapi, 0);
3205
3206         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3207                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3208
3209         return work_done;
3210 }
3211
3212 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3213 {
3214         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3215         struct bnx2 *bp = bnapi->bp;
3216         int work_done = 0;
3217         struct status_block_msix *sblk = bnapi->status_blk.msix;
3218
3219         while (1) {
3220                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3221                 if (unlikely(work_done >= budget))
3222                         break;
3223
3224                 bnapi->last_status_idx = sblk->status_idx;
3225                 /* status idx must be read before checking for more work. */
3226                 rmb();
3227                 if (likely(!bnx2_has_fast_work(bnapi))) {
3228
3229                         netif_rx_complete(napi);
3230                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3231                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3232                                bnapi->last_status_idx);
3233                         break;
3234                 }
3235         }
3236         return work_done;
3237 }
3238
3239 static int bnx2_poll(struct napi_struct *napi, int budget)
3240 {
3241         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3242         struct bnx2 *bp = bnapi->bp;
3243         int work_done = 0;
3244         struct status_block *sblk = bnapi->status_blk.msi;
3245
3246         while (1) {
3247                 bnx2_poll_link(bp, bnapi);
3248
3249                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3250
3251                 /* bnapi->last_status_idx is used below to tell the hw how
3252                  * much work has been processed, so we must read it before
3253                  * checking for more work.
3254                  */
3255                 bnapi->last_status_idx = sblk->status_idx;
3256
3257                 if (unlikely(work_done >= budget))
3258                         break;
3259
3260                 rmb();
3261                 if (likely(!bnx2_has_work(bnapi))) {
3262                         netif_rx_complete(napi);
3263                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3264                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3265                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3266                                        bnapi->last_status_idx);
3267                                 break;
3268                         }
3269                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3270                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3271                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3272                                bnapi->last_status_idx);
3273
3274                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3275                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3276                                bnapi->last_status_idx);
3277                         break;
3278                 }
3279         }
3280
3281         return work_done;
3282 }
3283
3284 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3285  * from set_multicast.
3286  */
3287 static void
3288 bnx2_set_rx_mode(struct net_device *dev)
3289 {
3290         struct bnx2 *bp = netdev_priv(dev);
3291         u32 rx_mode, sort_mode;
3292         struct dev_addr_list *uc_ptr;
3293         int i;
3294
3295         if (!netif_running(dev))
3296                 return;
3297
3298         spin_lock_bh(&bp->phy_lock);
3299
3300         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3301                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3302         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3303 #ifdef BCM_VLAN
3304         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3305                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3306 #else
3307         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3308                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3309 #endif
3310         if (dev->flags & IFF_PROMISC) {
3311                 /* Promiscuous mode. */
3312                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3313                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3314                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3315         }
3316         else if (dev->flags & IFF_ALLMULTI) {
3317                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3318                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3319                                0xffffffff);
3320                 }
3321                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3322         }
3323         else {
3324                 /* Accept one or more multicast(s). */
3325                 struct dev_mc_list *mclist;
3326                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3327                 u32 regidx;
3328                 u32 bit;
3329                 u32 crc;
3330
3331                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3332
3333                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3334                      i++, mclist = mclist->next) {
3335
3336                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3337                         bit = crc & 0xff;
3338                         regidx = (bit & 0xe0) >> 5;
3339                         bit &= 0x1f;
3340                         mc_filter[regidx] |= (1 << bit);
3341                 }
3342
3343                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3344                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3345                                mc_filter[i]);
3346                 }
3347
3348                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3349         }
3350
3351         uc_ptr = NULL;
3352         if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3353                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3354                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3355                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3356         } else if (!(dev->flags & IFF_PROMISC)) {
3357                 uc_ptr = dev->uc_list;
3358
3359                 /* Add all entries into to the match filter list */
3360                 for (i = 0; i < dev->uc_count; i++) {
3361                         bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3362                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3363                         sort_mode |= (1 <<
3364                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3365                         uc_ptr = uc_ptr->next;
3366                 }
3367
3368         }
3369
3370         if (rx_mode != bp->rx_mode) {
3371                 bp->rx_mode = rx_mode;
3372                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3373         }
3374
3375         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3376         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3377         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3378
3379         spin_unlock_bh(&bp->phy_lock);
3380 }
3381
3382 static void
3383 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3384         u32 rv2p_proc)
3385 {
3386         int i;
3387         u32 val;
3388
3389         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3390                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3391                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3392                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3393                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3394         }
3395
3396         for (i = 0; i < rv2p_code_len; i += 8) {
3397                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3398                 rv2p_code++;
3399                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3400                 rv2p_code++;
3401
3402                 if (rv2p_proc == RV2P_PROC1) {
3403                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3404                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3405                 }
3406                 else {
3407                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3408                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3409                 }
3410         }
3411
3412         /* Reset the processor, un-stall is done later. */
3413         if (rv2p_proc == RV2P_PROC1) {
3414                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3415         }
3416         else {
3417                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3418         }
3419 }
3420
3421 static int
3422 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3423 {
3424         u32 offset;
3425         u32 val;
3426         int rc;
3427
3428         /* Halt the CPU. */
3429         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3430         val |= cpu_reg->mode_value_halt;
3431         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3432         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3433
3434         /* Load the Text area. */
3435         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3436         if (fw->gz_text) {
3437                 int j;
3438
3439                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3440                                        fw->gz_text_len);
3441                 if (rc < 0)
3442                         return rc;
3443
3444                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3445                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3446                 }
3447         }
3448
3449         /* Load the Data area. */
3450         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3451         if (fw->data) {
3452                 int j;
3453
3454                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3455                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3456                 }
3457         }
3458
3459         /* Load the SBSS area. */
3460         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3461         if (fw->sbss_len) {
3462                 int j;
3463
3464                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3465                         bnx2_reg_wr_ind(bp, offset, 0);
3466                 }
3467         }
3468
3469         /* Load the BSS area. */
3470         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3471         if (fw->bss_len) {
3472                 int j;
3473
3474                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3475                         bnx2_reg_wr_ind(bp, offset, 0);
3476                 }
3477         }
3478
3479         /* Load the Read-Only area. */
3480         offset = cpu_reg->spad_base +
3481                 (fw->rodata_addr - cpu_reg->mips_view_base);
3482         if (fw->rodata) {
3483                 int j;
3484
3485                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3486                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3487                 }
3488         }
3489
3490         /* Clear the pre-fetch instruction. */
3491         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3492         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3493
3494         /* Start the CPU. */
3495         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3496         val &= ~cpu_reg->mode_value_halt;
3497         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3498         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3499
3500         return 0;
3501 }
3502
3503 static int
3504 bnx2_init_cpus(struct bnx2 *bp)
3505 {
3506         struct fw_info *fw;
3507         int rc, rv2p_len;
3508         void *text, *rv2p;
3509
3510         /* Initialize the RV2P processor. */
3511         text = vmalloc(FW_BUF_SIZE);
3512         if (!text)
3513                 return -ENOMEM;
3514         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3515                 rv2p = bnx2_xi_rv2p_proc1;
3516                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3517         } else {
3518                 rv2p = bnx2_rv2p_proc1;
3519                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3520         }
3521         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3522         if (rc < 0)
3523                 goto init_cpu_err;
3524
3525         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3526
3527         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3528                 rv2p = bnx2_xi_rv2p_proc2;
3529                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3530         } else {
3531                 rv2p = bnx2_rv2p_proc2;
3532                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3533         }
3534         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3535         if (rc < 0)
3536                 goto init_cpu_err;
3537
3538         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3539
3540         /* Initialize the RX Processor. */
3541         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3542                 fw = &bnx2_rxp_fw_09;
3543         else
3544                 fw = &bnx2_rxp_fw_06;
3545
3546         fw->text = text;
3547         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3548         if (rc)
3549                 goto init_cpu_err;
3550
3551         /* Initialize the TX Processor. */
3552         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3553                 fw = &bnx2_txp_fw_09;
3554         else
3555                 fw = &bnx2_txp_fw_06;
3556
3557         fw->text = text;
3558         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3559         if (rc)
3560                 goto init_cpu_err;
3561
3562         /* Initialize the TX Patch-up Processor. */
3563         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3564                 fw = &bnx2_tpat_fw_09;
3565         else
3566                 fw = &bnx2_tpat_fw_06;
3567
3568         fw->text = text;
3569         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3570         if (rc)
3571                 goto init_cpu_err;
3572
3573         /* Initialize the Completion Processor. */
3574         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3575                 fw = &bnx2_com_fw_09;
3576         else
3577                 fw = &bnx2_com_fw_06;
3578
3579         fw->text = text;
3580         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3581         if (rc)
3582                 goto init_cpu_err;
3583
3584         /* Initialize the Command Processor. */
3585         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3586                 fw = &bnx2_cp_fw_09;
3587         else
3588                 fw = &bnx2_cp_fw_06;
3589
3590         fw->text = text;
3591         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3592
3593 init_cpu_err:
3594         vfree(text);
3595         return rc;
3596 }
3597
3598 static int
3599 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3600 {
3601         u16 pmcsr;
3602
3603         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3604
3605         switch (state) {
3606         case PCI_D0: {
3607                 u32 val;
3608
3609                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3610                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3611                         PCI_PM_CTRL_PME_STATUS);
3612
3613                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3614                         /* delay required during transition out of D3hot */
3615                         msleep(20);
3616
3617                 val = REG_RD(bp, BNX2_EMAC_MODE);
3618                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3619                 val &= ~BNX2_EMAC_MODE_MPKT;
3620                 REG_WR(bp, BNX2_EMAC_MODE, val);
3621
3622                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3623                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3624                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3625                 break;
3626         }
3627         case PCI_D3hot: {
3628                 int i;
3629                 u32 val, wol_msg;
3630
3631                 if (bp->wol) {
3632                         u32 advertising;
3633                         u8 autoneg;
3634
3635                         autoneg = bp->autoneg;
3636                         advertising = bp->advertising;
3637
3638                         if (bp->phy_port == PORT_TP) {
3639                                 bp->autoneg = AUTONEG_SPEED;
3640                                 bp->advertising = ADVERTISED_10baseT_Half |
3641                                         ADVERTISED_10baseT_Full |
3642                                         ADVERTISED_100baseT_Half |
3643                                         ADVERTISED_100baseT_Full |
3644                                         ADVERTISED_Autoneg;
3645                         }
3646
3647                         spin_lock_bh(&bp->phy_lock);
3648                         bnx2_setup_phy(bp, bp->phy_port);
3649                         spin_unlock_bh(&bp->phy_lock);
3650
3651                         bp->autoneg = autoneg;
3652                         bp->advertising = advertising;
3653
3654                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3655
3656                         val = REG_RD(bp, BNX2_EMAC_MODE);
3657
3658                         /* Enable port mode. */
3659                         val &= ~BNX2_EMAC_MODE_PORT;
3660                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3661                                BNX2_EMAC_MODE_ACPI_RCVD |
3662                                BNX2_EMAC_MODE_MPKT;
3663                         if (bp->phy_port == PORT_TP)
3664                                 val |= BNX2_EMAC_MODE_PORT_MII;
3665                         else {
3666                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3667                                 if (bp->line_speed == SPEED_2500)
3668                                         val |= BNX2_EMAC_MODE_25G_MODE;
3669                         }
3670
3671                         REG_WR(bp, BNX2_EMAC_MODE, val);
3672
3673                         /* receive all multicast */
3674                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3675                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3676                                        0xffffffff);
3677                         }
3678                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3679                                BNX2_EMAC_RX_MODE_SORT_MODE);
3680
3681                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3682                               BNX2_RPM_SORT_USER0_MC_EN;
3683                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3684                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3685                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3686                                BNX2_RPM_SORT_USER0_ENA);
3687
3688                         /* Need to enable EMAC and RPM for WOL. */
3689                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3690                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3691                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3692                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3693
3694                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3695                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3696                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3697
3698                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3699                 }
3700                 else {
3701                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3702                 }
3703
3704                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3705                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3706                                      1, 0);
3707
3708                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3709                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3710                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3711
3712                         if (bp->wol)
3713                                 pmcsr |= 3;
3714                 }
3715                 else {
3716                         pmcsr |= 3;
3717                 }
3718                 if (bp->wol) {
3719                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3720                 }
3721                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3722                                       pmcsr);
3723
3724                 /* No more memory access after this point until
3725                  * device is brought back to D0.
3726                  */
3727                 udelay(50);
3728                 break;
3729         }
3730         default:
3731                 return -EINVAL;
3732         }
3733         return 0;
3734 }
3735
3736 static int
3737 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3738 {
3739         u32 val;
3740         int j;
3741
3742         /* Request access to the flash interface. */
3743         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3744         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3745                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3746                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3747                         break;
3748
3749                 udelay(5);
3750         }
3751
3752         if (j >= NVRAM_TIMEOUT_COUNT)
3753                 return -EBUSY;
3754
3755         return 0;
3756 }
3757
3758 static int
3759 bnx2_release_nvram_lock(struct bnx2 *bp)
3760 {
3761         int j;
3762         u32 val;
3763
3764         /* Relinquish nvram interface. */
3765         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3766
3767         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3768                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3769                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3770                         break;
3771
3772                 udelay(5);
3773         }
3774
3775         if (j >= NVRAM_TIMEOUT_COUNT)
3776                 return -EBUSY;
3777
3778         return 0;
3779 }
3780
3781
3782 static int
3783 bnx2_enable_nvram_write(struct bnx2 *bp)
3784 {
3785         u32 val;
3786
3787         val = REG_RD(bp, BNX2_MISC_CFG);
3788         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3789
3790         if (bp->flash_info->flags & BNX2_NV_WREN) {
3791                 int j;
3792
3793                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3794                 REG_WR(bp, BNX2_NVM_COMMAND,
3795                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3796
3797                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3798                         udelay(5);
3799
3800                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3801                         if (val & BNX2_NVM_COMMAND_DONE)
3802                                 break;
3803                 }
3804
3805                 if (j >= NVRAM_TIMEOUT_COUNT)
3806                         return -EBUSY;
3807         }
3808         return 0;
3809 }
3810
3811 static void
3812 bnx2_disable_nvram_write(struct bnx2 *bp)
3813 {
3814         u32 val;
3815
3816         val = REG_RD(bp, BNX2_MISC_CFG);
3817         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3818 }
3819
3820
3821 static void
3822 bnx2_enable_nvram_access(struct bnx2 *bp)
3823 {
3824         u32 val;
3825
3826         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3827         /* Enable both bits, even on read. */
3828         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3829                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3830 }
3831
3832 static void
3833 bnx2_disable_nvram_access(struct bnx2 *bp)
3834 {
3835         u32 val;
3836
3837         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3838         /* Disable both bits, even after read. */
3839         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3840                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3841                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3842 }
3843
3844 static int
3845 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3846 {
3847         u32 cmd;
3848         int j;
3849
3850         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3851                 /* Buffered flash, no erase needed */
3852                 return 0;
3853
3854         /* Build an erase command */
3855         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3856               BNX2_NVM_COMMAND_DOIT;
3857
3858         /* Need to clear DONE bit separately. */
3859         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3860
3861         /* Address of the NVRAM to read from. */
3862         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3863
3864         /* Issue an erase command. */
3865         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3866
3867         /* Wait for completion. */
3868         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3869                 u32 val;
3870
3871                 udelay(5);
3872
3873                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3874                 if (val & BNX2_NVM_COMMAND_DONE)
3875                         break;
3876         }
3877
3878         if (j >= NVRAM_TIMEOUT_COUNT)
3879                 return -EBUSY;
3880
3881         return 0;
3882 }
3883
3884 static int
3885 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3886 {
3887         u32 cmd;
3888         int j;
3889
3890         /* Build the command word. */
3891         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3892
3893         /* Calculate an offset of a buffered flash, not needed for 5709. */
3894         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3895                 offset = ((offset / bp->flash_info->page_size) <<
3896                            bp->flash_info->page_bits) +
3897                           (offset % bp->flash_info->page_size);
3898         }
3899
3900         /* Need to clear DONE bit separately. */
3901         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3902
3903         /* Address of the NVRAM to read from. */
3904         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3905
3906         /* Issue a read command. */
3907         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3908
3909         /* Wait for completion. */
3910         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3911                 u32 val;
3912
3913                 udelay(5);
3914
3915                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3916                 if (val & BNX2_NVM_COMMAND_DONE) {
3917                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3918                         memcpy(ret_val, &v, 4);
3919                         break;
3920                 }
3921         }
3922         if (j >= NVRAM_TIMEOUT_COUNT)
3923                 return -EBUSY;
3924
3925         return 0;
3926 }
3927
3928
3929 static int
3930 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3931 {
3932         u32 cmd;
3933         __be32 val32;
3934         int j;
3935
3936         /* Build the command word. */
3937         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3938
3939         /* Calculate an offset of a buffered flash, not needed for 5709. */
3940         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3941                 offset = ((offset / bp->flash_info->page_size) <<
3942                           bp->flash_info->page_bits) +
3943                          (offset % bp->flash_info->page_size);
3944         }
3945
3946         /* Need to clear DONE bit separately. */
3947         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3948
3949         memcpy(&val32, val, 4);
3950
3951         /* Write the data. */
3952         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3953
3954         /* Address of the NVRAM to write to. */
3955         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3956
3957         /* Issue the write command. */
3958         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3959
3960         /* Wait for completion. */
3961         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3962                 udelay(5);
3963
3964                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3965                         break;
3966         }
3967         if (j >= NVRAM_TIMEOUT_COUNT)
3968                 return -EBUSY;
3969
3970         return 0;
3971 }
3972
3973 static int
3974 bnx2_init_nvram(struct bnx2 *bp)
3975 {
3976         u32 val;
3977         int j, entry_count, rc = 0;
3978         struct flash_spec *flash;
3979
3980         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3981                 bp->flash_info = &flash_5709;
3982                 goto get_flash_size;
3983         }
3984
3985         /* Determine the selected interface. */
3986         val = REG_RD(bp, BNX2_NVM_CFG1);
3987
3988         entry_count = ARRAY_SIZE(flash_table);
3989
3990         if (val & 0x40000000) {
3991
3992                 /* Flash interface has been reconfigured */
3993                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3994                      j++, flash++) {
3995                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3996                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3997                                 bp->flash_info = flash;
3998                                 break;
3999                         }
4000                 }
4001         }
4002         else {
4003                 u32 mask;
4004                 /* Not yet been reconfigured */
4005
4006                 if (val & (1 << 23))
4007                         mask = FLASH_BACKUP_STRAP_MASK;
4008                 else
4009                         mask = FLASH_STRAP_MASK;
4010
4011                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4012                         j++, flash++) {
4013
4014                         if ((val & mask) == (flash->strapping & mask)) {
4015                                 bp->flash_info = flash;
4016
4017                                 /* Request access to the flash interface. */
4018                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4019                                         return rc;
4020
4021                                 /* Enable access to flash interface */
4022                                 bnx2_enable_nvram_access(bp);
4023
4024                                 /* Reconfigure the flash interface */
4025                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4026                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4027                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4028                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4029
4030                                 /* Disable access to flash interface */
4031                                 bnx2_disable_nvram_access(bp);
4032                                 bnx2_release_nvram_lock(bp);
4033
4034                                 break;
4035                         }
4036                 }
4037         } /* if (val & 0x40000000) */
4038
4039         if (j == entry_count) {
4040                 bp->flash_info = NULL;
4041                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4042                 return -ENODEV;
4043         }
4044
4045 get_flash_size:
4046         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4047         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4048         if (val)
4049                 bp->flash_size = val;
4050         else
4051                 bp->flash_size = bp->flash_info->total_size;
4052
4053         return rc;
4054 }
4055
4056 static int
4057 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4058                 int buf_size)
4059 {
4060         int rc = 0;
4061         u32 cmd_flags, offset32, len32, extra;
4062
4063         if (buf_size == 0)
4064                 return 0;
4065
4066         /* Request access to the flash interface. */
4067         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4068                 return rc;
4069
4070         /* Enable access to flash interface */
4071         bnx2_enable_nvram_access(bp);
4072
4073         len32 = buf_size;
4074         offset32 = offset;
4075         extra = 0;
4076
4077         cmd_flags = 0;
4078
4079         if (offset32 & 3) {
4080                 u8 buf[4];
4081                 u32 pre_len;
4082
4083                 offset32 &= ~3;
4084                 pre_len = 4 - (offset & 3);
4085
4086                 if (pre_len >= len32) {
4087                         pre_len = len32;
4088                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4089                                     BNX2_NVM_COMMAND_LAST;
4090                 }
4091                 else {
4092                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4093                 }
4094
4095                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4096
4097                 if (rc)
4098                         return rc;
4099
4100                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4101
4102                 offset32 += 4;
4103                 ret_buf += pre_len;
4104                 len32 -= pre_len;
4105         }
4106         if (len32 & 3) {
4107                 extra = 4 - (len32 & 3);
4108                 len32 = (len32 + 4) & ~3;
4109         }
4110
4111         if (len32 == 4) {
4112                 u8 buf[4];
4113
4114                 if (cmd_flags)
4115                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4116                 else
4117                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4118                                     BNX2_NVM_COMMAND_LAST;
4119
4120                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4121
4122                 memcpy(ret_buf, buf, 4 - extra);
4123         }
4124         else if (len32 > 0) {
4125                 u8 buf[4];
4126
4127                 /* Read the first word. */
4128                 if (cmd_flags)
4129                         cmd_flags = 0;
4130                 else
4131                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4132
4133                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4134
4135                 /* Advance to the next dword. */
4136                 offset32 += 4;
4137                 ret_buf += 4;
4138                 len32 -= 4;
4139
4140                 while (len32 > 4 && rc == 0) {
4141                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4142
4143                         /* Advance to the next dword. */
4144                         offset32 += 4;
4145                         ret_buf += 4;
4146                         len32 -= 4;
4147                 }
4148
4149                 if (rc)
4150                         return rc;
4151
4152                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4153                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4154
4155                 memcpy(ret_buf, buf, 4 - extra);
4156         }
4157
4158         /* Disable access to flash interface */
4159         bnx2_disable_nvram_access(bp);
4160
4161         bnx2_release_nvram_lock(bp);
4162
4163         return rc;
4164 }
4165
4166 static int
4167 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4168                 int buf_size)
4169 {
4170         u32 written, offset32, len32;
4171         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4172         int rc = 0;
4173         int align_start, align_end;
4174
4175         buf = data_buf;
4176         offset32 = offset;
4177         len32 = buf_size;
4178         align_start = align_end = 0;
4179
4180         if ((align_start = (offset32 & 3))) {
4181                 offset32 &= ~3;
4182                 len32 += align_start;
4183                 if (len32 < 4)
4184                         len32 = 4;
4185                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4186                         return rc;
4187         }
4188
4189         if (len32 & 3) {
4190                 align_end = 4 - (len32 & 3);
4191                 len32 += align_end;
4192                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4193                         return rc;
4194         }
4195
4196         if (align_start || align_end) {
4197                 align_buf = kmalloc(len32, GFP_KERNEL);
4198                 if (align_buf == NULL)
4199                         return -ENOMEM;
4200                 if (align_start) {
4201                         memcpy(align_buf, start, 4);
4202                 }
4203                 if (align_end) {
4204                         memcpy(align_buf + len32 - 4, end, 4);
4205                 }
4206                 memcpy(align_buf + align_start, data_buf, buf_size);
4207                 buf = align_buf;
4208         }
4209
4210         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4211                 flash_buffer = kmalloc(264, GFP_KERNEL);
4212                 if (flash_buffer == NULL) {
4213                         rc = -ENOMEM;
4214                         goto nvram_write_end;
4215                 }
4216         }
4217
4218         written = 0;
4219         while ((written < len32) && (rc == 0)) {
4220                 u32 page_start, page_end, data_start, data_end;
4221                 u32 addr, cmd_flags;
4222                 int i;
4223
4224                 /* Find the page_start addr */
4225                 page_start = offset32 + written;
4226                 page_start -= (page_start % bp->flash_info->page_size);
4227                 /* Find the page_end addr */
4228                 page_end = page_start + bp->flash_info->page_size;
4229                 /* Find the data_start addr */
4230                 data_start = (written == 0) ? offset32 : page_start;
4231                 /* Find the data_end addr */
4232                 data_end = (page_end > offset32 + len32) ?
4233                         (offset32 + len32) : page_end;
4234
4235                 /* Request access to the flash interface. */
4236                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4237                         goto nvram_write_end;
4238
4239                 /* Enable access to flash interface */
4240                 bnx2_enable_nvram_access(bp);
4241
4242                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4243                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4244                         int j;
4245
4246                         /* Read the whole page into the buffer
4247                          * (non-buffer flash only) */
4248                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4249                                 if (j == (bp->flash_info->page_size - 4)) {
4250                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4251                                 }
4252                                 rc = bnx2_nvram_read_dword(bp,
4253                                         page_start + j,
4254                                         &flash_buffer[j],
4255                                         cmd_flags);
4256
4257                                 if (rc)
4258                                         goto nvram_write_end;
4259
4260                                 cmd_flags = 0;
4261                         }
4262                 }
4263
4264                 /* Enable writes to flash interface (unlock write-protect) */
4265                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4266                         goto nvram_write_end;
4267
4268                 /* Loop to write back the buffer data from page_start to
4269                  * data_start */
4270                 i = 0;
4271                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4272                         /* Erase the page */
4273                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4274                                 goto nvram_write_end;
4275
4276                         /* Re-enable the write again for the actual write */
4277                         bnx2_enable_nvram_write(bp);
4278
4279                         for (addr = page_start; addr < data_start;
4280                                 addr += 4, i += 4) {
4281
4282                                 rc = bnx2_nvram_write_dword(bp, addr,
4283                                         &flash_buffer[i], cmd_flags);
4284
4285                                 if (rc != 0)
4286                                         goto nvram_write_end;
4287
4288                                 cmd_flags = 0;
4289                         }
4290                 }
4291
4292                 /* Loop to write the new data from data_start to data_end */
4293                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4294                         if ((addr == page_end - 4) ||
4295                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4296                                  (addr == data_end - 4))) {
4297
4298                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4299                         }
4300                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4301                                 cmd_flags);
4302
4303                         if (rc != 0)
4304                                 goto nvram_write_end;
4305
4306                         cmd_flags = 0;
4307                         buf += 4;
4308                 }
4309
4310                 /* Loop to write back the buffer data from data_end
4311                  * to page_end */
4312                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4313                         for (addr = data_end; addr < page_end;
4314                                 addr += 4, i += 4) {
4315
4316                                 if (addr == page_end-4) {
4317                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4318                                 }
4319                                 rc = bnx2_nvram_write_dword(bp, addr,
4320                                         &flash_buffer[i], cmd_flags);
4321
4322                                 if (rc != 0)
4323                                         goto nvram_write_end;
4324
4325                                 cmd_flags = 0;
4326                         }
4327                 }
4328
4329                 /* Disable writes to flash interface (lock write-protect) */
4330                 bnx2_disable_nvram_write(bp);
4331
4332                 /* Disable access to flash interface */
4333                 bnx2_disable_nvram_access(bp);
4334                 bnx2_release_nvram_lock(bp);
4335
4336                 /* Increment written */
4337                 written += data_end - data_start;
4338         }
4339
4340 nvram_write_end:
4341         kfree(flash_buffer);
4342         kfree(align_buf);
4343         return rc;
4344 }
4345
4346 static void
4347 bnx2_init_fw_cap(struct bnx2 *bp)
4348 {
4349         u32 val, sig = 0;
4350
4351         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4352         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4353
4354         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4355                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4356
4357         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4358         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4359                 return;
4360
4361         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4362                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4363                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4364         }
4365
4366         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4367             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4368                 u32 link;
4369
4370                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4371
4372                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4373                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4374                         bp->phy_port = PORT_FIBRE;
4375                 else
4376                         bp->phy_port = PORT_TP;
4377
4378                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4379                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4380         }
4381
4382         if (netif_running(bp->dev) && sig)
4383                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4384 }
4385
4386 static void
4387 bnx2_setup_msix_tbl(struct bnx2 *bp)
4388 {
4389         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4390
4391         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4392         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4393 }
4394
4395 static int
4396 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4397 {
4398         u32 val;
4399         int i, rc = 0;
4400         u8 old_port;
4401
4402         /* Wait for the current PCI transaction to complete before
4403          * issuing a reset. */
4404         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4405                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4406                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4407                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4408                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4409         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4410         udelay(5);
4411
4412         /* Wait for the firmware to tell us it is ok to issue a reset. */
4413         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4414
4415         /* Deposit a driver reset signature so the firmware knows that
4416          * this is a soft reset. */
4417         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4418                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4419
4420         /* Do a dummy read to force the chip to complete all current transaction
4421          * before we issue a reset. */
4422         val = REG_RD(bp, BNX2_MISC_ID);
4423
4424         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4425                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4426                 REG_RD(bp, BNX2_MISC_COMMAND);
4427                 udelay(5);
4428
4429                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4430                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4431
4432                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4433
4434         } else {
4435                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4436                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4437                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4438
4439                 /* Chip reset. */
4440                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4441
4442                 /* Reading back any register after chip reset will hang the
4443                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4444                  * of margin for write posting.
4445                  */
4446                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4447                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4448                         msleep(20);
4449
4450                 /* Reset takes approximate 30 usec */
4451                 for (i = 0; i < 10; i++) {
4452                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4453                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4454                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4455                                 break;
4456                         udelay(10);
4457                 }
4458
4459                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4460                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4461                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4462                         return -EBUSY;
4463                 }
4464         }
4465
4466         /* Make sure byte swapping is properly configured. */
4467         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4468         if (val != 0x01020304) {
4469                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4470                 return -ENODEV;
4471         }
4472
4473         /* Wait for the firmware to finish its initialization. */
4474         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4475         if (rc)
4476                 return rc;
4477
4478         spin_lock_bh(&bp->phy_lock);
4479         old_port = bp->phy_port;
4480         bnx2_init_fw_cap(bp);
4481         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4482             old_port != bp->phy_port)
4483                 bnx2_set_default_remote_link(bp);
4484         spin_unlock_bh(&bp->phy_lock);
4485
4486         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4487                 /* Adjust the voltage regular to two steps lower.  The default
4488                  * of this register is 0x0000000e. */
4489                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4490
4491                 /* Remove bad rbuf memory from the free pool. */
4492                 rc = bnx2_alloc_bad_rbuf(bp);
4493         }
4494
4495         if (bp->flags & BNX2_FLAG_USING_MSIX)
4496                 bnx2_setup_msix_tbl(bp);
4497
4498         return rc;
4499 }
4500
4501 static int
4502 bnx2_init_chip(struct bnx2 *bp)
4503 {
4504         u32 val, mtu;
4505         int rc, i;
4506
4507         /* Make sure the interrupt is not active. */
4508         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4509
4510         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4511               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4512 #ifdef __BIG_ENDIAN
4513               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4514 #endif
4515               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4516               DMA_READ_CHANS << 12 |
4517               DMA_WRITE_CHANS << 16;
4518
4519         val |= (0x2 << 20) | (1 << 11);
4520
4521         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4522                 val |= (1 << 23);
4523
4524         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4525             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4526                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4527
4528         REG_WR(bp, BNX2_DMA_CONFIG, val);
4529
4530         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4531                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4532                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4533                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4534         }
4535
4536         if (bp->flags & BNX2_FLAG_PCIX) {
4537                 u16 val16;
4538
4539                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4540                                      &val16);
4541                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4542                                       val16 & ~PCI_X_CMD_ERO);
4543         }
4544
4545         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4546                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4547                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4548                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4549
4550         /* Initialize context mapping and zero out the quick contexts.  The
4551          * context block must have already been enabled. */
4552         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4553                 rc = bnx2_init_5709_context(bp);
4554                 if (rc)
4555                         return rc;
4556         } else
4557                 bnx2_init_context(bp);
4558
4559         if ((rc = bnx2_init_cpus(bp)) != 0)
4560                 return rc;
4561
4562         bnx2_init_nvram(bp);
4563
4564         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4565
4566         val = REG_RD(bp, BNX2_MQ_CONFIG);
4567         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4568         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4569         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4570                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4571
4572         REG_WR(bp, BNX2_MQ_CONFIG, val);
4573
4574         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4575         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4576         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4577
4578         val = (BCM_PAGE_BITS - 8) << 24;
4579         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4580
4581         /* Configure page size. */
4582         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4583         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4584         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4585         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4586
4587         val = bp->mac_addr[0] +
4588               (bp->mac_addr[1] << 8) +
4589               (bp->mac_addr[2] << 16) +
4590               bp->mac_addr[3] +
4591               (bp->mac_addr[4] << 8) +
4592               (bp->mac_addr[5] << 16);
4593         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4594
4595         /* Program the MTU.  Also include 4 bytes for CRC32. */
4596         mtu = bp->dev->mtu;
4597         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4598         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4599                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4600         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4601
4602         if (mtu < 1500)
4603                 mtu = 1500;
4604
4605         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4606         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4607         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4608
4609         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4610                 bp->bnx2_napi[i].last_status_idx = 0;
4611
4612         bp->idle_chk_status_idx = 0xffff;
4613
4614         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4615
4616         /* Set up how to generate a link change interrupt. */
4617         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4618
4619         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4620                (u64) bp->status_blk_mapping & 0xffffffff);
4621         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4622
4623         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4624                (u64) bp->stats_blk_mapping & 0xffffffff);
4625         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4626                (u64) bp->stats_blk_mapping >> 32);
4627
4628         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4629                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4630
4631         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4632                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4633
4634         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4635                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4636
4637         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4638
4639         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4640
4641         REG_WR(bp, BNX2_HC_COM_TICKS,
4642                (bp->com_ticks_int << 16) | bp->com_ticks);
4643
4644         REG_WR(bp, BNX2_HC_CMD_TICKS,
4645                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4646
4647         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4648                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4649         else
4650                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4651         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4652
4653         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4654                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4655         else {
4656                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4657                       BNX2_HC_CONFIG_COLLECT_STATS;
4658         }
4659
4660         if (bp->irq_nvecs > 1) {
4661                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4662                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4663
4664                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4665         }
4666
4667         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4668                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4669
4670         REG_WR(bp, BNX2_HC_CONFIG, val);
4671
4672         for (i = 1; i < bp->irq_nvecs; i++) {
4673                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4674                            BNX2_HC_SB_CONFIG_1;
4675
4676                 REG_WR(bp, base,
4677                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4678                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4679                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4680
4681                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4682                         (bp->tx_quick_cons_trip_int << 16) |
4683                          bp->tx_quick_cons_trip);
4684
4685                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4686                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4687
4688                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4689                        (bp->rx_quick_cons_trip_int << 16) |
4690                         bp->rx_quick_cons_trip);
4691
4692                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4693                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4694         }
4695
4696         /* Clear internal stats counters. */
4697         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4698
4699         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4700
4701         /* Initialize the receive filter. */
4702         bnx2_set_rx_mode(bp->dev);
4703
4704         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4705                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4706                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4707                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4708         }
4709         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4710                           1, 0);
4711
4712         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4713         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4714
4715         udelay(20);
4716
4717         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4718
4719         return rc;
4720 }
4721
4722 static void
4723 bnx2_clear_ring_states(struct bnx2 *bp)
4724 {
4725         struct bnx2_napi *bnapi;
4726         struct bnx2_tx_ring_info *txr;
4727         struct bnx2_rx_ring_info *rxr;
4728         int i;
4729
4730         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4731                 bnapi = &bp->bnx2_napi[i];
4732                 txr = &bnapi->tx_ring;
4733                 rxr = &bnapi->rx_ring;
4734
4735                 txr->tx_cons = 0;
4736                 txr->hw_tx_cons = 0;
4737                 rxr->rx_prod_bseq = 0;
4738                 rxr->rx_prod = 0;
4739                 rxr->rx_cons = 0;
4740                 rxr->rx_pg_prod = 0;
4741                 rxr->rx_pg_cons = 0;
4742         }
4743 }
4744
4745 static void
4746 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4747 {
4748         u32 val, offset0, offset1, offset2, offset3;
4749         u32 cid_addr = GET_CID_ADDR(cid);
4750
4751         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4752                 offset0 = BNX2_L2CTX_TYPE_XI;
4753                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4754                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4755                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4756         } else {
4757                 offset0 = BNX2_L2CTX_TYPE;
4758                 offset1 = BNX2_L2CTX_CMD_TYPE;
4759                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4760                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4761         }
4762         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4763         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4764
4765         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4766         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4767
4768         val = (u64) txr->tx_desc_mapping >> 32;
4769         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4770
4771         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4772         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4773 }
4774
4775 static void
4776 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4777 {
4778         struct tx_bd *txbd;
4779         u32 cid = TX_CID;
4780         struct bnx2_napi *bnapi;
4781         struct bnx2_tx_ring_info *txr;
4782
4783         bnapi = &bp->bnx2_napi[ring_num];
4784         txr = &bnapi->tx_ring;
4785
4786         if (ring_num == 0)
4787                 cid = TX_CID;
4788         else
4789                 cid = TX_TSS_CID + ring_num - 1;
4790
4791         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4792
4793         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4794
4795         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4796         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4797
4798         txr->tx_prod = 0;
4799         txr->tx_prod_bseq = 0;
4800
4801         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4802         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4803
4804         bnx2_init_tx_context(bp, cid, txr);
4805 }
4806
4807 static void
4808 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4809                      int num_rings)
4810 {
4811         int i;
4812         struct rx_bd *rxbd;
4813
4814         for (i = 0; i < num_rings; i++) {
4815                 int j;
4816
4817                 rxbd = &rx_ring[i][0];
4818                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4819                         rxbd->rx_bd_len = buf_size;
4820                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4821                 }
4822                 if (i == (num_rings - 1))
4823                         j = 0;
4824                 else
4825                         j = i + 1;
4826                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4827                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4828         }
4829 }
4830
4831 static void
4832 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4833 {
4834         int i;
4835         u16 prod, ring_prod;
4836         u32 cid, rx_cid_addr, val;
4837         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4838         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4839
4840         if (ring_num == 0)
4841                 cid = RX_CID;
4842         else
4843                 cid = RX_RSS_CID + ring_num - 1;
4844
4845         rx_cid_addr = GET_CID_ADDR(cid);
4846
4847         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4848                              bp->rx_buf_use_size, bp->rx_max_ring);
4849
4850         bnx2_init_rx_context(bp, cid);
4851
4852         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4853                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4854                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4855         }
4856
4857         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4858         if (bp->rx_pg_ring_size) {
4859                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4860                                      rxr->rx_pg_desc_mapping,
4861                                      PAGE_SIZE, bp->rx_max_pg_ring);
4862                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4863                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4864                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4865                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4866
4867                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4868                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4869
4870                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4871                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4872
4873                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4874                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4875         }
4876
4877         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4878         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4879
4880         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4881         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4882
4883         ring_prod = prod = rxr->rx_pg_prod;
4884         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4885                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4886                         break;
4887                 prod = NEXT_RX_BD(prod);
4888                 ring_prod = RX_PG_RING_IDX(prod);
4889         }
4890         rxr->rx_pg_prod = prod;
4891
4892         ring_prod = prod = rxr->rx_prod;
4893         for (i = 0; i < bp->rx_ring_size; i++) {
4894                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4895                         break;
4896                 prod = NEXT_RX_BD(prod);
4897                 ring_prod = RX_RING_IDX(prod);
4898         }
4899         rxr->rx_prod = prod;
4900
4901         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4902         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4903         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4904
4905         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4906         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4907
4908         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4909 }
4910
4911 static void
4912 bnx2_init_all_rings(struct bnx2 *bp)
4913 {
4914         int i;
4915         u32 val;
4916
4917         bnx2_clear_ring_states(bp);
4918
4919         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4920         for (i = 0; i < bp->num_tx_rings; i++)
4921                 bnx2_init_tx_ring(bp, i);
4922
4923         if (bp->num_tx_rings > 1)
4924                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4925                        (TX_TSS_CID << 7));
4926
4927         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4928         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4929
4930         for (i = 0; i < bp->num_rx_rings; i++)
4931                 bnx2_init_rx_ring(bp, i);
4932
4933         if (bp->num_rx_rings > 1) {
4934                 u32 tbl_32;
4935                 u8 *tbl = (u8 *) &tbl_32;
4936
4937                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4938                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4939
4940                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4941                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
4942                         if ((i % 4) == 3)
4943                                 bnx2_reg_wr_ind(bp,
4944                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
4945                                                 cpu_to_be32(tbl_32));
4946                 }
4947
4948                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4949                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4950
4951                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4952
4953         }
4954 }
4955
4956 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4957 {
4958         u32 max, num_rings = 1;
4959
4960         while (ring_size > MAX_RX_DESC_CNT) {
4961                 ring_size -= MAX_RX_DESC_CNT;
4962                 num_rings++;
4963         }
4964         /* round to next power of 2 */
4965         max = max_size;
4966         while ((max & num_rings) == 0)
4967                 max >>= 1;
4968
4969         if (num_rings != max)
4970                 max <<= 1;
4971
4972         return max;
4973 }
4974
4975 static void
4976 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4977 {
4978         u32 rx_size, rx_space, jumbo_size;
4979
4980         /* 8 for CRC and VLAN */
4981         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4982
4983         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4984                 sizeof(struct skb_shared_info);
4985
4986         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4987         bp->rx_pg_ring_size = 0;
4988         bp->rx_max_pg_ring = 0;
4989         bp->rx_max_pg_ring_idx = 0;
4990         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4991                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4992
4993                 jumbo_size = size * pages;
4994                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4995                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4996
4997                 bp->rx_pg_ring_size = jumbo_size;
4998                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4999                                                         MAX_RX_PG_RINGS);
5000                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5001                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5002                 bp->rx_copy_thresh = 0;
5003         }
5004
5005         bp->rx_buf_use_size = rx_size;
5006         /* hw alignment */
5007         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5008         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5009         bp->rx_ring_size = size;
5010         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5011         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5012 }
5013
5014 static void
5015 bnx2_free_tx_skbs(struct bnx2 *bp)
5016 {
5017         int i;
5018
5019         for (i = 0; i < bp->num_tx_rings; i++) {
5020                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5021                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5022                 int j;
5023
5024                 if (txr->tx_buf_ring == NULL)
5025                         continue;
5026
5027                 for (j = 0; j < TX_DESC_CNT; ) {
5028                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5029                         struct sk_buff *skb = tx_buf->skb;
5030
5031                         if (skb == NULL) {
5032                                 j++;
5033                                 continue;
5034                         }
5035
5036                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5037
5038                         tx_buf->skb = NULL;
5039
5040                         j += skb_shinfo(skb)->nr_frags + 1;
5041                         dev_kfree_skb(skb);
5042                 }
5043         }
5044 }
5045
5046 static void
5047 bnx2_free_rx_skbs(struct bnx2 *bp)
5048 {
5049         int i;
5050
5051         for (i = 0; i < bp->num_rx_rings; i++) {
5052                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5053                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5054                 int j;
5055
5056                 if (rxr->rx_buf_ring == NULL)
5057                         return;
5058
5059                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5060                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5061                         struct sk_buff *skb = rx_buf->skb;
5062
5063                         if (skb == NULL)
5064                                 continue;
5065
5066                         pci_unmap_single(bp->pdev,
5067                                          pci_unmap_addr(rx_buf, mapping),
5068                                          bp->rx_buf_use_size,
5069                                          PCI_DMA_FROMDEVICE);
5070
5071                         rx_buf->skb = NULL;
5072
5073                         dev_kfree_skb(skb);
5074                 }
5075                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5076                         bnx2_free_rx_page(bp, rxr, j);
5077         }
5078 }
5079
5080 static void
5081 bnx2_free_skbs(struct bnx2 *bp)
5082 {
5083         bnx2_free_tx_skbs(bp);
5084         bnx2_free_rx_skbs(bp);
5085 }
5086
5087 static int
5088 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5089 {
5090         int rc;
5091
5092         rc = bnx2_reset_chip(bp, reset_code);
5093         bnx2_free_skbs(bp);
5094         if (rc)
5095                 return rc;
5096
5097         if ((rc = bnx2_init_chip(bp)) != 0)
5098                 return rc;
5099
5100         bnx2_init_all_rings(bp);
5101         return 0;
5102 }
5103
5104 static int
5105 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5106 {
5107         int rc;
5108
5109         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5110                 return rc;
5111
5112         spin_lock_bh(&bp->phy_lock);
5113         bnx2_init_phy(bp, reset_phy);
5114         bnx2_set_link(bp);
5115         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5116                 bnx2_remote_phy_event(bp);
5117         spin_unlock_bh(&bp->phy_lock);
5118         return 0;
5119 }
5120
5121 static int
5122 bnx2_shutdown_chip(struct bnx2 *bp)
5123 {
5124         u32 reset_code;
5125
5126         if (bp->flags & BNX2_FLAG_NO_WOL)
5127                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5128         else if (bp->wol)
5129                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5130         else
5131                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5132
5133         return bnx2_reset_chip(bp, reset_code);
5134 }
5135
5136 static int
5137 bnx2_test_registers(struct bnx2 *bp)
5138 {
5139         int ret;
5140         int i, is_5709;
5141         static const struct {
5142                 u16   offset;
5143                 u16   flags;
5144 #define BNX2_FL_NOT_5709        1
5145                 u32   rw_mask;
5146                 u32   ro_mask;
5147         } reg_tbl[] = {
5148                 { 0x006c, 0, 0x00000000, 0x0000003f },
5149                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5150                 { 0x0094, 0, 0x00000000, 0x00000000 },
5151
5152                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5153                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5154                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5155                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5156                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5157                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5158                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5159                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5160                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5161
5162                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5163                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5164                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5165                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5166                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5167                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5168
5169                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5170                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5171                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5172
5173                 { 0x1000, 0, 0x00000000, 0x00000001 },
5174                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5175
5176                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5177                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5178                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5179                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5180                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5181                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5182                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5183                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5184                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5185                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5186
5187                 { 0x1800, 0, 0x00000000, 0x00000001 },
5188                 { 0x1804, 0, 0x00000000, 0x00000003 },
5189
5190                 { 0x2800, 0, 0x00000000, 0x00000001 },
5191                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5192                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5193                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5194                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5195                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5196                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5197                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5198                 { 0x2840, 0, 0x00000000, 0xffffffff },
5199                 { 0x2844, 0, 0x00000000, 0xffffffff },
5200                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5201                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5202
5203                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5204                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5205
5206                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5207                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5208                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5209                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5210                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5211                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5212                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5213                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5214                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5215
5216                 { 0x5004, 0, 0x00000000, 0x0000007f },
5217                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5218
5219                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5220                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5221                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5222                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5223                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5224                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5225                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5226                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5227                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5228
5229                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5230                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5231                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5232                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5233                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5234                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5235                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5236                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5237                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5238                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5239                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5240                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5241                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5242                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5243                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5244                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5245                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5246                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5247                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5248                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5249                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5250                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5251                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5252
5253                 { 0xffff, 0, 0x00000000, 0x00000000 },
5254         };
5255
5256         ret = 0;
5257         is_5709 = 0;
5258         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5259                 is_5709 = 1;
5260
5261         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5262                 u32 offset, rw_mask, ro_mask, save_val, val;
5263                 u16 flags = reg_tbl[i].flags;
5264
5265                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5266                         continue;
5267
5268                 offset = (u32) reg_tbl[i].offset;
5269                 rw_mask = reg_tbl[i].rw_mask;
5270                 ro_mask = reg_tbl[i].ro_mask;
5271
5272                 save_val = readl(bp->regview + offset);
5273
5274                 writel(0, bp->regview + offset);
5275
5276                 val = readl(bp->regview + offset);
5277                 if ((val & rw_mask) != 0) {
5278                         goto reg_test_err;
5279                 }
5280
5281                 if ((val & ro_mask) != (save_val & ro_mask)) {
5282                         goto reg_test_err;
5283                 }
5284
5285                 writel(0xffffffff, bp->regview + offset);
5286
5287                 val = readl(bp->regview + offset);
5288                 if ((val & rw_mask) != rw_mask) {
5289                         goto reg_test_err;
5290                 }
5291
5292                 if ((val & ro_mask) != (save_val & ro_mask)) {
5293                         goto reg_test_err;
5294                 }
5295
5296                 writel(save_val, bp->regview + offset);
5297                 continue;
5298
5299 reg_test_err:
5300                 writel(save_val, bp->regview + offset);
5301                 ret = -ENODEV;
5302                 break;
5303         }
5304         return ret;
5305 }
5306
5307 static int
5308 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5309 {
5310         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5311                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5312         int i;
5313
5314         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5315                 u32 offset;
5316
5317                 for (offset = 0; offset < size; offset += 4) {
5318
5319                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5320
5321                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5322                                 test_pattern[i]) {
5323                                 return -ENODEV;
5324                         }
5325                 }
5326         }
5327         return 0;
5328 }
5329
5330 static int
5331 bnx2_test_memory(struct bnx2 *bp)
5332 {
5333         int ret = 0;
5334         int i;
5335         static struct mem_entry {
5336                 u32   offset;
5337                 u32   len;
5338         } mem_tbl_5706[] = {
5339                 { 0x60000,  0x4000 },
5340                 { 0xa0000,  0x3000 },
5341                 { 0xe0000,  0x4000 },
5342                 { 0x120000, 0x4000 },
5343                 { 0x1a0000, 0x4000 },
5344                 { 0x160000, 0x4000 },
5345                 { 0xffffffff, 0    },
5346         },
5347         mem_tbl_5709[] = {
5348                 { 0x60000,  0x4000 },
5349                 { 0xa0000,  0x3000 },
5350                 { 0xe0000,  0x4000 },
5351                 { 0x120000, 0x4000 },
5352                 { 0x1a0000, 0x4000 },
5353                 { 0xffffffff, 0    },
5354         };
5355         struct mem_entry *mem_tbl;
5356
5357         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5358                 mem_tbl = mem_tbl_5709;
5359         else
5360                 mem_tbl = mem_tbl_5706;
5361
5362         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5363                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5364                         mem_tbl[i].len)) != 0) {
5365                         return ret;
5366                 }
5367         }
5368
5369         return ret;
5370 }
5371
5372 #define BNX2_MAC_LOOPBACK       0
5373 #define BNX2_PHY_LOOPBACK       1
5374
5375 static int
5376 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5377 {
5378         unsigned int pkt_size, num_pkts, i;
5379         struct sk_buff *skb, *rx_skb;
5380         unsigned char *packet;
5381         u16 rx_start_idx, rx_idx;
5382         dma_addr_t map;
5383         struct tx_bd *txbd;
5384         struct sw_bd *rx_buf;
5385         struct l2_fhdr *rx_hdr;
5386         int ret = -ENODEV;
5387         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5388         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5389         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5390
5391         tx_napi = bnapi;
5392
5393         txr = &tx_napi->tx_ring;
5394         rxr = &bnapi->rx_ring;
5395         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5396                 bp->loopback = MAC_LOOPBACK;
5397                 bnx2_set_mac_loopback(bp);
5398         }
5399         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5400                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5401                         return 0;
5402
5403                 bp->loopback = PHY_LOOPBACK;
5404                 bnx2_set_phy_loopback(bp);
5405         }
5406         else
5407                 return -EINVAL;
5408
5409         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5410         skb = netdev_alloc_skb(bp->dev, pkt_size);
5411         if (!skb)
5412                 return -ENOMEM;
5413         packet = skb_put(skb, pkt_size);
5414         memcpy(packet, bp->dev->dev_addr, 6);
5415         memset(packet + 6, 0x0, 8);
5416         for (i = 14; i < pkt_size; i++)
5417                 packet[i] = (unsigned char) (i & 0xff);
5418
5419         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5420                 dev_kfree_skb(skb);
5421                 return -EIO;
5422         }
5423         map = skb_shinfo(skb)->dma_maps[0];
5424
5425         REG_WR(bp, BNX2_HC_COMMAND,
5426                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5427
5428         REG_RD(bp, BNX2_HC_COMMAND);
5429
5430         udelay(5);
5431         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5432
5433         num_pkts = 0;
5434
5435         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5436
5437         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5438         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5439         txbd->tx_bd_mss_nbytes = pkt_size;
5440         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5441
5442         num_pkts++;
5443         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5444         txr->tx_prod_bseq += pkt_size;
5445
5446         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5447         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5448
5449         udelay(100);
5450
5451         REG_WR(bp, BNX2_HC_COMMAND,
5452                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5453
5454         REG_RD(bp, BNX2_HC_COMMAND);
5455
5456         udelay(5);
5457
5458         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5459         dev_kfree_skb(skb);
5460
5461         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5462                 goto loopback_test_done;
5463
5464         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5465         if (rx_idx != rx_start_idx + num_pkts) {
5466                 goto loopback_test_done;
5467         }
5468
5469         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5470         rx_skb = rx_buf->skb;
5471
5472         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5473         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5474
5475         pci_dma_sync_single_for_cpu(bp->pdev,
5476                 pci_unmap_addr(rx_buf, mapping),
5477                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5478
5479         if (rx_hdr->l2_fhdr_status &
5480                 (L2_FHDR_ERRORS_BAD_CRC |
5481                 L2_FHDR_ERRORS_PHY_DECODE |
5482                 L2_FHDR_ERRORS_ALIGNMENT |
5483                 L2_FHDR_ERRORS_TOO_SHORT |
5484                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5485
5486                 goto loopback_test_done;
5487         }
5488
5489         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5490                 goto loopback_test_done;
5491         }
5492
5493         for (i = 14; i < pkt_size; i++) {
5494                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5495                         goto loopback_test_done;
5496                 }
5497         }
5498
5499         ret = 0;
5500
5501 loopback_test_done:
5502         bp->loopback = 0;
5503         return ret;
5504 }
5505
5506 #define BNX2_MAC_LOOPBACK_FAILED        1
5507 #define BNX2_PHY_LOOPBACK_FAILED        2
5508 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5509                                          BNX2_PHY_LOOPBACK_FAILED)
5510
5511 static int
5512 bnx2_test_loopback(struct bnx2 *bp)
5513 {
5514         int rc = 0;
5515
5516         if (!netif_running(bp->dev))
5517                 return BNX2_LOOPBACK_FAILED;
5518
5519         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5520         spin_lock_bh(&bp->phy_lock);
5521         bnx2_init_phy(bp, 1);
5522         spin_unlock_bh(&bp->phy_lock);
5523         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5524                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5525         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5526                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5527         return rc;
5528 }
5529
5530 #define NVRAM_SIZE 0x200
5531 #define CRC32_RESIDUAL 0xdebb20e3
5532
5533 static int
5534 bnx2_test_nvram(struct bnx2 *bp)
5535 {
5536         __be32 buf[NVRAM_SIZE / 4];
5537         u8 *data = (u8 *) buf;
5538         int rc = 0;
5539         u32 magic, csum;
5540
5541         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5542                 goto test_nvram_done;
5543
5544         magic = be32_to_cpu(buf[0]);
5545         if (magic != 0x669955aa) {
5546                 rc = -ENODEV;
5547                 goto test_nvram_done;
5548         }
5549
5550         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5551                 goto test_nvram_done;
5552
5553         csum = ether_crc_le(0x100, data);
5554         if (csum != CRC32_RESIDUAL) {
5555                 rc = -ENODEV;
5556                 goto test_nvram_done;
5557         }
5558
5559         csum = ether_crc_le(0x100, data + 0x100);
5560         if (csum != CRC32_RESIDUAL) {
5561                 rc = -ENODEV;
5562         }
5563
5564 test_nvram_done:
5565         return rc;
5566 }
5567
5568 static int
5569 bnx2_test_link(struct bnx2 *bp)
5570 {
5571         u32 bmsr;
5572
5573         if (!netif_running(bp->dev))
5574                 return -ENODEV;
5575
5576         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5577                 if (bp->link_up)
5578                         return 0;
5579                 return -ENODEV;
5580         }
5581         spin_lock_bh(&bp->phy_lock);
5582         bnx2_enable_bmsr1(bp);
5583         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5584         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5585         bnx2_disable_bmsr1(bp);
5586         spin_unlock_bh(&bp->phy_lock);
5587
5588         if (bmsr & BMSR_LSTATUS) {
5589                 return 0;
5590         }
5591         return -ENODEV;
5592 }
5593
5594 static int
5595 bnx2_test_intr(struct bnx2 *bp)
5596 {
5597         int i;
5598         u16 status_idx;
5599
5600         if (!netif_running(bp->dev))
5601                 return -ENODEV;
5602
5603         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5604
5605         /* This register is not touched during run-time. */
5606         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5607         REG_RD(bp, BNX2_HC_COMMAND);
5608
5609         for (i = 0; i < 10; i++) {
5610                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5611                         status_idx) {
5612
5613                         break;
5614                 }
5615
5616                 msleep_interruptible(10);
5617         }
5618         if (i < 10)
5619                 return 0;
5620
5621         return -ENODEV;
5622 }
5623
5624 /* Determining link for parallel detection. */
5625 static int
5626 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5627 {
5628         u32 mode_ctl, an_dbg, exp;
5629
5630         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5631                 return 0;
5632
5633         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5634         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5635
5636         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5637                 return 0;
5638
5639         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5640         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5641         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5642
5643         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5644                 return 0;
5645
5646         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5647         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5648         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5649
5650         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5651                 return 0;
5652
5653         return 1;
5654 }
5655
5656 static void
5657 bnx2_5706_serdes_timer(struct bnx2 *bp)
5658 {
5659         int check_link = 1;
5660
5661         spin_lock(&bp->phy_lock);
5662         if (bp->serdes_an_pending) {
5663                 bp->serdes_an_pending--;
5664                 check_link = 0;
5665         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5666                 u32 bmcr;
5667
5668                 bp->current_interval = BNX2_TIMER_INTERVAL;
5669
5670                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5671
5672                 if (bmcr & BMCR_ANENABLE) {
5673                         if (bnx2_5706_serdes_has_link(bp)) {
5674                                 bmcr &= ~BMCR_ANENABLE;
5675                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5676                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5677                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5678                         }
5679                 }
5680         }
5681         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5682                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5683                 u32 phy2;
5684
5685                 bnx2_write_phy(bp, 0x17, 0x0f01);
5686                 bnx2_read_phy(bp, 0x15, &phy2);
5687                 if (phy2 & 0x20) {
5688                         u32 bmcr;
5689
5690                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5691                         bmcr |= BMCR_ANENABLE;
5692                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5693
5694                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5695                 }
5696         } else
5697                 bp->current_interval = BNX2_TIMER_INTERVAL;
5698
5699         if (check_link) {
5700                 u32 val;
5701
5702                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5703                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5704                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5705
5706                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5707                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5708                                 bnx2_5706s_force_link_dn(bp, 1);
5709                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5710                         } else
5711                                 bnx2_set_link(bp);
5712                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5713                         bnx2_set_link(bp);
5714         }
5715         spin_unlock(&bp->phy_lock);
5716 }
5717
5718 static void
5719 bnx2_5708_serdes_timer(struct bnx2 *bp)
5720 {
5721         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5722                 return;
5723
5724         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5725                 bp->serdes_an_pending = 0;
5726                 return;
5727         }
5728
5729         spin_lock(&bp->phy_lock);
5730         if (bp->serdes_an_pending)
5731                 bp->serdes_an_pending--;
5732         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5733                 u32 bmcr;
5734
5735                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5736                 if (bmcr & BMCR_ANENABLE) {
5737                         bnx2_enable_forced_2g5(bp);
5738                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5739                 } else {
5740                         bnx2_disable_forced_2g5(bp);
5741                         bp->serdes_an_pending = 2;
5742                         bp->current_interval = BNX2_TIMER_INTERVAL;
5743                 }
5744
5745         } else
5746                 bp->current_interval = BNX2_TIMER_INTERVAL;
5747
5748         spin_unlock(&bp->phy_lock);
5749 }
5750
5751 static void
5752 bnx2_timer(unsigned long data)
5753 {
5754         struct bnx2 *bp = (struct bnx2 *) data;
5755
5756         if (!netif_running(bp->dev))
5757                 return;
5758
5759         if (atomic_read(&bp->intr_sem) != 0)
5760                 goto bnx2_restart_timer;
5761
5762         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5763              BNX2_FLAG_USING_MSI)
5764                 bnx2_chk_missed_msi(bp);
5765
5766         bnx2_send_heart_beat(bp);
5767
5768         bp->stats_blk->stat_FwRxDrop =
5769                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5770
5771         /* workaround occasional corrupted counters */
5772         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5773                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5774                                             BNX2_HC_COMMAND_STATS_NOW);
5775
5776         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5777                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5778                         bnx2_5706_serdes_timer(bp);
5779                 else
5780                         bnx2_5708_serdes_timer(bp);
5781         }
5782
5783 bnx2_restart_timer:
5784         mod_timer(&bp->timer, jiffies + bp->current_interval);
5785 }
5786
5787 static int
5788 bnx2_request_irq(struct bnx2 *bp)
5789 {
5790         unsigned long flags;
5791         struct bnx2_irq *irq;
5792         int rc = 0, i;
5793
5794         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5795                 flags = 0;
5796         else
5797                 flags = IRQF_SHARED;
5798
5799         for (i = 0; i < bp->irq_nvecs; i++) {
5800                 irq = &bp->irq_tbl[i];
5801                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5802                                  &bp->bnx2_napi[i]);
5803                 if (rc)
5804                         break;
5805                 irq->requested = 1;
5806         }
5807         return rc;
5808 }
5809
5810 static void
5811 bnx2_free_irq(struct bnx2 *bp)
5812 {
5813         struct bnx2_irq *irq;
5814         int i;
5815
5816         for (i = 0; i < bp->irq_nvecs; i++) {
5817                 irq = &bp->irq_tbl[i];
5818                 if (irq->requested)
5819                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5820                 irq->requested = 0;
5821         }
5822         if (bp->flags & BNX2_FLAG_USING_MSI)
5823                 pci_disable_msi(bp->pdev);
5824         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5825                 pci_disable_msix(bp->pdev);
5826
5827         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5828 }
5829
5830 static void
5831 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5832 {
5833         int i, rc;
5834         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5835         struct net_device *dev = bp->dev;
5836         const int len = sizeof(bp->irq_tbl[0].name);
5837
5838         bnx2_setup_msix_tbl(bp);
5839         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5840         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5841         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5842
5843         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5844                 msix_ent[i].entry = i;
5845                 msix_ent[i].vector = 0;
5846
5847                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5848                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5849         }
5850
5851         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5852         if (rc != 0)
5853                 return;
5854
5855         bp->irq_nvecs = msix_vecs;
5856         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5857         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5858                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5859 }
5860
5861 static void
5862 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5863 {
5864         int cpus = num_online_cpus();
5865         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5866
5867         bp->irq_tbl[0].handler = bnx2_interrupt;
5868         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5869         bp->irq_nvecs = 1;
5870         bp->irq_tbl[0].vector = bp->pdev->irq;
5871
5872         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5873                 bnx2_enable_msix(bp, msix_vecs);
5874
5875         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5876             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5877                 if (pci_enable_msi(bp->pdev) == 0) {
5878                         bp->flags |= BNX2_FLAG_USING_MSI;
5879                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5880                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5881                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5882                         } else
5883                                 bp->irq_tbl[0].handler = bnx2_msi;
5884
5885                         bp->irq_tbl[0].vector = bp->pdev->irq;
5886                 }
5887         }
5888
5889         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5890         bp->dev->real_num_tx_queues = bp->num_tx_rings;
5891
5892         bp->num_rx_rings = bp->irq_nvecs;
5893 }
5894
5895 /* Called with rtnl_lock */
5896 static int
5897 bnx2_open(struct net_device *dev)
5898 {
5899         struct bnx2 *bp = netdev_priv(dev);
5900         int rc;
5901
5902         netif_carrier_off(dev);
5903
5904         bnx2_set_power_state(bp, PCI_D0);
5905         bnx2_disable_int(bp);
5906
5907         bnx2_setup_int_mode(bp, disable_msi);
5908         bnx2_napi_enable(bp);
5909         rc = bnx2_alloc_mem(bp);
5910         if (rc)
5911                 goto open_err;
5912
5913         rc = bnx2_request_irq(bp);
5914         if (rc)
5915                 goto open_err;
5916
5917         rc = bnx2_init_nic(bp, 1);
5918         if (rc)
5919                 goto open_err;
5920
5921         mod_timer(&bp->timer, jiffies + bp->current_interval);
5922
5923         atomic_set(&bp->intr_sem, 0);
5924
5925         bnx2_enable_int(bp);
5926
5927         if (bp->flags & BNX2_FLAG_USING_MSI) {
5928                 /* Test MSI to make sure it is working
5929                  * If MSI test fails, go back to INTx mode
5930                  */
5931                 if (bnx2_test_intr(bp) != 0) {
5932                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5933                                " using MSI, switching to INTx mode. Please"
5934                                " report this failure to the PCI maintainer"
5935                                " and include system chipset information.\n",
5936                                bp->dev->name);
5937
5938                         bnx2_disable_int(bp);
5939                         bnx2_free_irq(bp);
5940
5941                         bnx2_setup_int_mode(bp, 1);
5942
5943                         rc = bnx2_init_nic(bp, 0);
5944
5945                         if (!rc)
5946                                 rc = bnx2_request_irq(bp);
5947
5948                         if (rc) {
5949                                 del_timer_sync(&bp->timer);
5950                                 goto open_err;
5951                         }
5952                         bnx2_enable_int(bp);
5953                 }
5954         }
5955         if (bp->flags & BNX2_FLAG_USING_MSI)
5956                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5957         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5958                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5959
5960         netif_tx_start_all_queues(dev);
5961
5962         return 0;
5963
5964 open_err:
5965         bnx2_napi_disable(bp);
5966         bnx2_free_skbs(bp);
5967         bnx2_free_irq(bp);
5968         bnx2_free_mem(bp);
5969         return rc;
5970 }
5971
5972 static void
5973 bnx2_reset_task(struct work_struct *work)
5974 {
5975         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5976
5977         if (!netif_running(bp->dev))
5978                 return;
5979
5980         bnx2_netif_stop(bp);
5981
5982         bnx2_init_nic(bp, 1);
5983
5984         atomic_set(&bp->intr_sem, 1);
5985         bnx2_netif_start(bp);
5986 }
5987
5988 static void
5989 bnx2_tx_timeout(struct net_device *dev)
5990 {
5991         struct bnx2 *bp = netdev_priv(dev);
5992
5993         /* This allows the netif to be shutdown gracefully before resetting */
5994         schedule_work(&bp->reset_task);
5995 }
5996
5997 #ifdef BCM_VLAN
5998 /* Called with rtnl_lock */
5999 static void
6000 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6001 {
6002         struct bnx2 *bp = netdev_priv(dev);
6003
6004         bnx2_netif_stop(bp);
6005
6006         bp->vlgrp = vlgrp;
6007         bnx2_set_rx_mode(dev);
6008         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6009                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6010
6011         bnx2_netif_start(bp);
6012 }
6013 #endif
6014
6015 /* Called with netif_tx_lock.
6016  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6017  * netif_wake_queue().
6018  */
6019 static int
6020 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6021 {
6022         struct bnx2 *bp = netdev_priv(dev);
6023         dma_addr_t mapping;
6024         struct tx_bd *txbd;
6025         struct sw_tx_bd *tx_buf;
6026         u32 len, vlan_tag_flags, last_frag, mss;
6027         u16 prod, ring_prod;
6028         int i;
6029         struct bnx2_napi *bnapi;
6030         struct bnx2_tx_ring_info *txr;
6031         struct netdev_queue *txq;
6032         struct skb_shared_info *sp;
6033
6034         /*  Determine which tx ring we will be placed on */
6035         i = skb_get_queue_mapping(skb);
6036         bnapi = &bp->bnx2_napi[i];
6037         txr = &bnapi->tx_ring;
6038         txq = netdev_get_tx_queue(dev, i);
6039
6040         if (unlikely(bnx2_tx_avail(bp, txr) <
6041             (skb_shinfo(skb)->nr_frags + 1))) {
6042                 netif_tx_stop_queue(txq);
6043                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6044                         dev->name);
6045
6046                 return NETDEV_TX_BUSY;
6047         }
6048         len = skb_headlen(skb);
6049         prod = txr->tx_prod;
6050         ring_prod = TX_RING_IDX(prod);
6051
6052         vlan_tag_flags = 0;
6053         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6054                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6055         }
6056
6057 #ifdef BCM_VLAN
6058         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6059                 vlan_tag_flags |=
6060                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6061         }
6062 #endif
6063         if ((mss = skb_shinfo(skb)->gso_size)) {
6064                 u32 tcp_opt_len;
6065                 struct iphdr *iph;
6066
6067                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6068
6069                 tcp_opt_len = tcp_optlen(skb);
6070
6071                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6072                         u32 tcp_off = skb_transport_offset(skb) -
6073                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6074
6075                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6076                                           TX_BD_FLAGS_SW_FLAGS;
6077                         if (likely(tcp_off == 0))
6078                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6079                         else {
6080                                 tcp_off >>= 3;
6081                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6082                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6083                                                   ((tcp_off & 0x10) <<
6084                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6085                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6086                         }
6087                 } else {
6088                         iph = ip_hdr(skb);
6089                         if (tcp_opt_len || (iph->ihl > 5)) {
6090                                 vlan_tag_flags |= ((iph->ihl - 5) +
6091                                                    (tcp_opt_len >> 2)) << 8;
6092                         }
6093                 }
6094         } else
6095                 mss = 0;
6096
6097         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6098                 dev_kfree_skb(skb);
6099                 return NETDEV_TX_OK;
6100         }
6101
6102         sp = skb_shinfo(skb);
6103         mapping = sp->dma_maps[0];
6104
6105         tx_buf = &txr->tx_buf_ring[ring_prod];
6106         tx_buf->skb = skb;
6107
6108         txbd = &txr->tx_desc_ring[ring_prod];
6109
6110         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6111         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6112         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6113         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6114
6115         last_frag = skb_shinfo(skb)->nr_frags;
6116
6117         for (i = 0; i < last_frag; i++) {
6118                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6119
6120                 prod = NEXT_TX_BD(prod);
6121                 ring_prod = TX_RING_IDX(prod);
6122                 txbd = &txr->tx_desc_ring[ring_prod];
6123
6124                 len = frag->size;
6125                 mapping = sp->dma_maps[i + 1];
6126
6127                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6128                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6129                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6130                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6131
6132         }
6133         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6134
6135         prod = NEXT_TX_BD(prod);
6136         txr->tx_prod_bseq += skb->len;
6137
6138         REG_WR16(bp, txr->tx_bidx_addr, prod);
6139         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6140
6141         mmiowb();
6142
6143         txr->tx_prod = prod;
6144         dev->trans_start = jiffies;
6145
6146         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6147                 netif_tx_stop_queue(txq);
6148                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6149                         netif_tx_wake_queue(txq);
6150         }
6151
6152         return NETDEV_TX_OK;
6153 }
6154
6155 /* Called with rtnl_lock */
6156 static int
6157 bnx2_close(struct net_device *dev)
6158 {
6159         struct bnx2 *bp = netdev_priv(dev);
6160
6161         cancel_work_sync(&bp->reset_task);
6162
6163         bnx2_disable_int_sync(bp);
6164         bnx2_napi_disable(bp);
6165         del_timer_sync(&bp->timer);
6166         bnx2_shutdown_chip(bp);
6167         bnx2_free_irq(bp);
6168         bnx2_free_skbs(bp);
6169         bnx2_free_mem(bp);
6170         bp->link_up = 0;
6171         netif_carrier_off(bp->dev);
6172         bnx2_set_power_state(bp, PCI_D3hot);
6173         return 0;
6174 }
6175
6176 #define GET_NET_STATS64(ctr)                                    \
6177         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6178         (unsigned long) (ctr##_lo)
6179
6180 #define GET_NET_STATS32(ctr)            \
6181         (ctr##_lo)
6182
6183 #if (BITS_PER_LONG == 64)
6184 #define GET_NET_STATS   GET_NET_STATS64
6185 #else
6186 #define GET_NET_STATS   GET_NET_STATS32
6187 #endif
6188
6189 static struct net_device_stats *
6190 bnx2_get_stats(struct net_device *dev)
6191 {
6192         struct bnx2 *bp = netdev_priv(dev);
6193         struct statistics_block *stats_blk = bp->stats_blk;
6194         struct net_device_stats *net_stats = &dev->stats;
6195
6196         if (bp->stats_blk == NULL) {
6197                 return net_stats;
6198         }
6199         net_stats->rx_packets =
6200                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6201                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6202                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6203
6204         net_stats->tx_packets =
6205                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6206                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6207                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6208
6209         net_stats->rx_bytes =
6210                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6211
6212         net_stats->tx_bytes =
6213                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6214
6215         net_stats->multicast =
6216                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6217
6218         net_stats->collisions =
6219                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6220
6221         net_stats->rx_length_errors =
6222                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6223                 stats_blk->stat_EtherStatsOverrsizePkts);
6224
6225         net_stats->rx_over_errors =
6226                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6227
6228         net_stats->rx_frame_errors =
6229                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6230
6231         net_stats->rx_crc_errors =
6232                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6233
6234         net_stats->rx_errors = net_stats->rx_length_errors +
6235                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6236                 net_stats->rx_crc_errors;
6237
6238         net_stats->tx_aborted_errors =
6239                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6240                 stats_blk->stat_Dot3StatsLateCollisions);
6241
6242         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6243             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6244                 net_stats->tx_carrier_errors = 0;
6245         else {
6246                 net_stats->tx_carrier_errors =
6247                         (unsigned long)
6248                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6249         }
6250
6251         net_stats->tx_errors =
6252                 (unsigned long)
6253                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6254                 +
6255                 net_stats->tx_aborted_errors +
6256                 net_stats->tx_carrier_errors;
6257
6258         net_stats->rx_missed_errors =
6259                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6260                 stats_blk->stat_FwRxDrop);
6261
6262         return net_stats;
6263 }
6264
6265 /* All ethtool functions called with rtnl_lock */
6266
6267 static int
6268 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6269 {
6270         struct bnx2 *bp = netdev_priv(dev);
6271         int support_serdes = 0, support_copper = 0;
6272
6273         cmd->supported = SUPPORTED_Autoneg;
6274         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6275                 support_serdes = 1;
6276                 support_copper = 1;
6277         } else if (bp->phy_port == PORT_FIBRE)
6278                 support_serdes = 1;
6279         else
6280                 support_copper = 1;
6281
6282         if (support_serdes) {
6283                 cmd->supported |= SUPPORTED_1000baseT_Full |
6284                         SUPPORTED_FIBRE;
6285                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6286                         cmd->supported |= SUPPORTED_2500baseX_Full;
6287
6288         }
6289         if (support_copper) {
6290                 cmd->supported |= SUPPORTED_10baseT_Half |
6291                         SUPPORTED_10baseT_Full |
6292                         SUPPORTED_100baseT_Half |
6293                         SUPPORTED_100baseT_Full |
6294                         SUPPORTED_1000baseT_Full |
6295                         SUPPORTED_TP;
6296
6297         }
6298
6299         spin_lock_bh(&bp->phy_lock);
6300         cmd->port = bp->phy_port;
6301         cmd->advertising = bp->advertising;
6302
6303         if (bp->autoneg & AUTONEG_SPEED) {
6304                 cmd->autoneg = AUTONEG_ENABLE;
6305         }
6306         else {
6307                 cmd->autoneg = AUTONEG_DISABLE;
6308         }
6309
6310         if (netif_carrier_ok(dev)) {
6311                 cmd->speed = bp->line_speed;
6312                 cmd->duplex = bp->duplex;
6313         }
6314         else {
6315                 cmd->speed = -1;
6316                 cmd->duplex = -1;
6317         }
6318         spin_unlock_bh(&bp->phy_lock);
6319
6320         cmd->transceiver = XCVR_INTERNAL;
6321         cmd->phy_address = bp->phy_addr;
6322
6323         return 0;
6324 }
6325
6326 static int
6327 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6328 {
6329         struct bnx2 *bp = netdev_priv(dev);
6330         u8 autoneg = bp->autoneg;
6331         u8 req_duplex = bp->req_duplex;
6332         u16 req_line_speed = bp->req_line_speed;
6333         u32 advertising = bp->advertising;
6334         int err = -EINVAL;
6335
6336         spin_lock_bh(&bp->phy_lock);
6337
6338         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6339                 goto err_out_unlock;
6340
6341         if (cmd->port != bp->phy_port &&
6342             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6343                 goto err_out_unlock;
6344
6345         /* If device is down, we can store the settings only if the user
6346          * is setting the currently active port.
6347          */
6348         if (!netif_running(dev) && cmd->port != bp->phy_port)
6349                 goto err_out_unlock;
6350
6351         if (cmd->autoneg == AUTONEG_ENABLE) {
6352                 autoneg |= AUTONEG_SPEED;
6353
6354                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6355
6356                 /* allow advertising 1 speed */
6357                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6358                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6359                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6360                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6361
6362                         if (cmd->port == PORT_FIBRE)
6363                                 goto err_out_unlock;
6364
6365                         advertising = cmd->advertising;
6366
6367                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6368                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6369                             (cmd->port == PORT_TP))
6370                                 goto err_out_unlock;
6371                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6372                         advertising = cmd->advertising;
6373                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6374                         goto err_out_unlock;
6375                 else {
6376                         if (cmd->port == PORT_FIBRE)
6377                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6378                         else
6379                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6380                 }
6381                 advertising |= ADVERTISED_Autoneg;
6382         }
6383         else {
6384                 if (cmd->port == PORT_FIBRE) {
6385                         if ((cmd->speed != SPEED_1000 &&
6386                              cmd->speed != SPEED_2500) ||
6387                             (cmd->duplex != DUPLEX_FULL))
6388                                 goto err_out_unlock;
6389
6390                         if (cmd->speed == SPEED_2500 &&
6391                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6392                                 goto err_out_unlock;
6393                 }
6394                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6395                         goto err_out_unlock;
6396
6397                 autoneg &= ~AUTONEG_SPEED;
6398                 req_line_speed = cmd->speed;
6399                 req_duplex = cmd->duplex;
6400                 advertising = 0;
6401         }
6402
6403         bp->autoneg = autoneg;
6404         bp->advertising = advertising;
6405         bp->req_line_speed = req_line_speed;
6406         bp->req_duplex = req_duplex;
6407
6408         err = 0;
6409         /* If device is down, the new settings will be picked up when it is
6410          * brought up.
6411          */
6412         if (netif_running(dev))
6413                 err = bnx2_setup_phy(bp, cmd->port);
6414
6415 err_out_unlock:
6416         spin_unlock_bh(&bp->phy_lock);
6417
6418         return err;
6419 }
6420
6421 static void
6422 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6423 {
6424         struct bnx2 *bp = netdev_priv(dev);
6425
6426         strcpy(info->driver, DRV_MODULE_NAME);
6427         strcpy(info->version, DRV_MODULE_VERSION);
6428         strcpy(info->bus_info, pci_name(bp->pdev));
6429         strcpy(info->fw_version, bp->fw_version);
6430 }
6431
6432 #define BNX2_REGDUMP_LEN                (32 * 1024)
6433
6434 static int
6435 bnx2_get_regs_len(struct net_device *dev)
6436 {
6437         return BNX2_REGDUMP_LEN;
6438 }
6439
6440 static void
6441 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6442 {
6443         u32 *p = _p, i, offset;
6444         u8 *orig_p = _p;
6445         struct bnx2 *bp = netdev_priv(dev);
6446         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6447                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6448                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6449                                  0x1040, 0x1048, 0x1080, 0x10a4,
6450                                  0x1400, 0x1490, 0x1498, 0x14f0,
6451                                  0x1500, 0x155c, 0x1580, 0x15dc,
6452                                  0x1600, 0x1658, 0x1680, 0x16d8,
6453                                  0x1800, 0x1820, 0x1840, 0x1854,
6454                                  0x1880, 0x1894, 0x1900, 0x1984,
6455                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6456                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6457                                  0x2000, 0x2030, 0x23c0, 0x2400,
6458                                  0x2800, 0x2820, 0x2830, 0x2850,
6459                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6460                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6461                                  0x4080, 0x4090, 0x43c0, 0x4458,
6462                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6463                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6464                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6465                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6466                                  0x6800, 0x6848, 0x684c, 0x6860,
6467                                  0x6888, 0x6910, 0x8000 };
6468
6469         regs->version = 0;
6470
6471         memset(p, 0, BNX2_REGDUMP_LEN);
6472
6473         if (!netif_running(bp->dev))
6474                 return;
6475
6476         i = 0;
6477         offset = reg_boundaries[0];
6478         p += offset;
6479         while (offset < BNX2_REGDUMP_LEN) {
6480                 *p++ = REG_RD(bp, offset);
6481                 offset += 4;
6482                 if (offset == reg_boundaries[i + 1]) {
6483                         offset = reg_boundaries[i + 2];
6484                         p = (u32 *) (orig_p + offset);
6485                         i += 2;
6486                 }
6487         }
6488 }
6489
6490 static void
6491 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6492 {
6493         struct bnx2 *bp = netdev_priv(dev);
6494
6495         if (bp->flags & BNX2_FLAG_NO_WOL) {
6496                 wol->supported = 0;
6497                 wol->wolopts = 0;
6498         }
6499         else {
6500                 wol->supported = WAKE_MAGIC;
6501                 if (bp->wol)
6502                         wol->wolopts = WAKE_MAGIC;
6503                 else
6504                         wol->wolopts = 0;
6505         }
6506         memset(&wol->sopass, 0, sizeof(wol->sopass));
6507 }
6508
6509 static int
6510 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6511 {
6512         struct bnx2 *bp = netdev_priv(dev);
6513
6514         if (wol->wolopts & ~WAKE_MAGIC)
6515                 return -EINVAL;
6516
6517         if (wol->wolopts & WAKE_MAGIC) {
6518                 if (bp->flags & BNX2_FLAG_NO_WOL)
6519                         return -EINVAL;
6520
6521                 bp->wol = 1;
6522         }
6523         else {
6524                 bp->wol = 0;
6525         }
6526         return 0;
6527 }
6528
6529 static int
6530 bnx2_nway_reset(struct net_device *dev)
6531 {
6532         struct bnx2 *bp = netdev_priv(dev);
6533         u32 bmcr;
6534
6535         if (!netif_running(dev))
6536                 return -EAGAIN;
6537
6538         if (!(bp->autoneg & AUTONEG_SPEED)) {
6539                 return -EINVAL;
6540         }
6541
6542         spin_lock_bh(&bp->phy_lock);
6543
6544         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6545                 int rc;
6546
6547                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6548                 spin_unlock_bh(&bp->phy_lock);
6549                 return rc;
6550         }
6551
6552         /* Force a link down visible on the other side */
6553         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6554                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6555                 spin_unlock_bh(&bp->phy_lock);
6556
6557                 msleep(20);
6558
6559                 spin_lock_bh(&bp->phy_lock);
6560
6561                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6562                 bp->serdes_an_pending = 1;
6563                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6564         }
6565
6566         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6567         bmcr &= ~BMCR_LOOPBACK;
6568         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6569
6570         spin_unlock_bh(&bp->phy_lock);
6571
6572         return 0;
6573 }
6574
6575 static int
6576 bnx2_get_eeprom_len(struct net_device *dev)
6577 {
6578         struct bnx2 *bp = netdev_priv(dev);
6579
6580         if (bp->flash_info == NULL)
6581                 return 0;
6582
6583         return (int) bp->flash_size;
6584 }
6585
6586 static int
6587 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6588                 u8 *eebuf)
6589 {
6590         struct bnx2 *bp = netdev_priv(dev);
6591         int rc;
6592
6593         if (!netif_running(dev))
6594                 return -EAGAIN;
6595
6596         /* parameters already validated in ethtool_get_eeprom */
6597
6598         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6599
6600         return rc;
6601 }
6602
6603 static int
6604 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6605                 u8 *eebuf)
6606 {
6607         struct bnx2 *bp = netdev_priv(dev);
6608         int rc;
6609
6610         if (!netif_running(dev))
6611                 return -EAGAIN;
6612
6613         /* parameters already validated in ethtool_set_eeprom */
6614
6615         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6616
6617         return rc;
6618 }
6619
6620 static int
6621 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6622 {
6623         struct bnx2 *bp = netdev_priv(dev);
6624
6625         memset(coal, 0, sizeof(struct ethtool_coalesce));
6626
6627         coal->rx_coalesce_usecs = bp->rx_ticks;
6628         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6629         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6630         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6631
6632         coal->tx_coalesce_usecs = bp->tx_ticks;
6633         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6634         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6635         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6636
6637         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6638
6639         return 0;
6640 }
6641
6642 static int
6643 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6644 {
6645         struct bnx2 *bp = netdev_priv(dev);
6646
6647         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6648         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6649
6650         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6651         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6652
6653         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6654         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6655
6656         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6657         if (bp->rx_quick_cons_trip_int > 0xff)
6658                 bp->rx_quick_cons_trip_int = 0xff;
6659
6660         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6661         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6662
6663         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6664         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6665
6666         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6667         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6668
6669         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6670         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6671                 0xff;
6672
6673         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6674         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6675                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6676                         bp->stats_ticks = USEC_PER_SEC;
6677         }
6678         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6679                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6680         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6681
6682         if (netif_running(bp->dev)) {
6683                 bnx2_netif_stop(bp);
6684                 bnx2_init_nic(bp, 0);
6685                 bnx2_netif_start(bp);
6686         }
6687
6688         return 0;
6689 }
6690
6691 static void
6692 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6693 {
6694         struct bnx2 *bp = netdev_priv(dev);
6695
6696         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6697         ering->rx_mini_max_pending = 0;
6698         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6699
6700         ering->rx_pending = bp->rx_ring_size;
6701         ering->rx_mini_pending = 0;
6702         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6703
6704         ering->tx_max_pending = MAX_TX_DESC_CNT;
6705         ering->tx_pending = bp->tx_ring_size;
6706 }
6707
6708 static int
6709 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6710 {
6711         if (netif_running(bp->dev)) {
6712                 bnx2_netif_stop(bp);
6713                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6714                 bnx2_free_skbs(bp);
6715                 bnx2_free_mem(bp);
6716         }
6717
6718         bnx2_set_rx_ring_size(bp, rx);
6719         bp->tx_ring_size = tx;
6720
6721         if (netif_running(bp->dev)) {
6722                 int rc;
6723
6724                 rc = bnx2_alloc_mem(bp);
6725                 if (rc)
6726                         return rc;
6727                 bnx2_init_nic(bp, 0);
6728                 bnx2_netif_start(bp);
6729         }
6730         return 0;
6731 }
6732
6733 static int
6734 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6735 {
6736         struct bnx2 *bp = netdev_priv(dev);
6737         int rc;
6738
6739         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6740                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6741                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6742
6743                 return -EINVAL;
6744         }
6745         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6746         return rc;
6747 }
6748
6749 static void
6750 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6751 {
6752         struct bnx2 *bp = netdev_priv(dev);
6753
6754         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6755         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6756         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6757 }
6758
6759 static int
6760 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6761 {
6762         struct bnx2 *bp = netdev_priv(dev);
6763
6764         bp->req_flow_ctrl = 0;
6765         if (epause->rx_pause)
6766                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6767         if (epause->tx_pause)
6768                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6769
6770         if (epause->autoneg) {
6771                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6772         }
6773         else {
6774                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6775         }
6776
6777         if (netif_running(dev)) {
6778                 spin_lock_bh(&bp->phy_lock);
6779                 bnx2_setup_phy(bp, bp->phy_port);
6780                 spin_unlock_bh(&bp->phy_lock);
6781         }
6782
6783         return 0;
6784 }
6785
6786 static u32
6787 bnx2_get_rx_csum(struct net_device *dev)
6788 {
6789         struct bnx2 *bp = netdev_priv(dev);
6790
6791         return bp->rx_csum;
6792 }
6793
6794 static int
6795 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6796 {
6797         struct bnx2 *bp = netdev_priv(dev);
6798
6799         bp->rx_csum = data;
6800         return 0;
6801 }
6802
6803 static int
6804 bnx2_set_tso(struct net_device *dev, u32 data)
6805 {
6806         struct bnx2 *bp = netdev_priv(dev);
6807
6808         if (data) {
6809                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6810                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6811                         dev->features |= NETIF_F_TSO6;
6812         } else
6813                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6814                                    NETIF_F_TSO_ECN);
6815         return 0;
6816 }
6817
6818 #define BNX2_NUM_STATS 46
6819
6820 static struct {
6821         char string[ETH_GSTRING_LEN];
6822 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6823         { "rx_bytes" },
6824         { "rx_error_bytes" },
6825         { "tx_bytes" },
6826         { "tx_error_bytes" },
6827         { "rx_ucast_packets" },
6828         { "rx_mcast_packets" },
6829         { "rx_bcast_packets" },
6830         { "tx_ucast_packets" },
6831         { "tx_mcast_packets" },
6832         { "tx_bcast_packets" },
6833         { "tx_mac_errors" },
6834         { "tx_carrier_errors" },
6835         { "rx_crc_errors" },
6836         { "rx_align_errors" },
6837         { "tx_single_collisions" },
6838         { "tx_multi_collisions" },
6839         { "tx_deferred" },
6840         { "tx_excess_collisions" },
6841         { "tx_late_collisions" },
6842         { "tx_total_collisions" },
6843         { "rx_fragments" },
6844         { "rx_jabbers" },
6845         { "rx_undersize_packets" },
6846         { "rx_oversize_packets" },
6847         { "rx_64_byte_packets" },
6848         { "rx_65_to_127_byte_packets" },
6849         { "rx_128_to_255_byte_packets" },
6850         { "rx_256_to_511_byte_packets" },
6851         { "rx_512_to_1023_byte_packets" },
6852         { "rx_1024_to_1522_byte_packets" },
6853         { "rx_1523_to_9022_byte_packets" },
6854         { "tx_64_byte_packets" },
6855         { "tx_65_to_127_byte_packets" },
6856         { "tx_128_to_255_byte_packets" },
6857         { "tx_256_to_511_byte_packets" },
6858         { "tx_512_to_1023_byte_packets" },
6859         { "tx_1024_to_1522_byte_packets" },
6860         { "tx_1523_to_9022_byte_packets" },
6861         { "rx_xon_frames" },
6862         { "rx_xoff_frames" },
6863         { "tx_xon_frames" },
6864         { "tx_xoff_frames" },
6865         { "rx_mac_ctrl_frames" },
6866         { "rx_filtered_packets" },
6867         { "rx_discards" },
6868         { "rx_fw_discards" },
6869 };
6870
6871 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6872
6873 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6874     STATS_OFFSET32(stat_IfHCInOctets_hi),
6875     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6876     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6877     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6878     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6879     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6880     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6881     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6882     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6883     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6884     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6885     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6886     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6887     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6888     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6889     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6890     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6891     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6892     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6893     STATS_OFFSET32(stat_EtherStatsCollisions),
6894     STATS_OFFSET32(stat_EtherStatsFragments),
6895     STATS_OFFSET32(stat_EtherStatsJabbers),
6896     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6897     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6898     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6899     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6900     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6901     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6902     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6903     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6904     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6905     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6906     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6907     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6908     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6909     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6910     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6911     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6912     STATS_OFFSET32(stat_XonPauseFramesReceived),
6913     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6914     STATS_OFFSET32(stat_OutXonSent),
6915     STATS_OFFSET32(stat_OutXoffSent),
6916     STATS_OFFSET32(stat_MacControlFramesReceived),
6917     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6918     STATS_OFFSET32(stat_IfInMBUFDiscards),
6919     STATS_OFFSET32(stat_FwRxDrop),
6920 };
6921
6922 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6923  * skipped because of errata.
6924  */
6925 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6926         8,0,8,8,8,8,8,8,8,8,
6927         4,0,4,4,4,4,4,4,4,4,
6928         4,4,4,4,4,4,4,4,4,4,
6929         4,4,4,4,4,4,4,4,4,4,
6930         4,4,4,4,4,4,
6931 };
6932
6933 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6934         8,0,8,8,8,8,8,8,8,8,
6935         4,4,4,4,4,4,4,4,4,4,
6936         4,4,4,4,4,4,4,4,4,4,
6937         4,4,4,4,4,4,4,4,4,4,
6938         4,4,4,4,4,4,
6939 };
6940
6941 #define BNX2_NUM_TESTS 6
6942
6943 static struct {
6944         char string[ETH_GSTRING_LEN];
6945 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6946         { "register_test (offline)" },
6947         { "memory_test (offline)" },
6948         { "loopback_test (offline)" },
6949         { "nvram_test (online)" },
6950         { "interrupt_test (online)" },
6951         { "link_test (online)" },
6952 };
6953
6954 static int
6955 bnx2_get_sset_count(struct net_device *dev, int sset)
6956 {
6957         switch (sset) {
6958         case ETH_SS_TEST:
6959                 return BNX2_NUM_TESTS;
6960         case ETH_SS_STATS:
6961                 return BNX2_NUM_STATS;
6962         default:
6963                 return -EOPNOTSUPP;
6964         }
6965 }
6966
6967 static void
6968 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6969 {
6970         struct bnx2 *bp = netdev_priv(dev);
6971
6972         bnx2_set_power_state(bp, PCI_D0);
6973
6974         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6975         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6976                 int i;
6977
6978                 bnx2_netif_stop(bp);
6979                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6980                 bnx2_free_skbs(bp);
6981
6982                 if (bnx2_test_registers(bp) != 0) {
6983                         buf[0] = 1;
6984                         etest->flags |= ETH_TEST_FL_FAILED;
6985                 }
6986                 if (bnx2_test_memory(bp) != 0) {
6987                         buf[1] = 1;
6988                         etest->flags |= ETH_TEST_FL_FAILED;
6989                 }
6990                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6991                         etest->flags |= ETH_TEST_FL_FAILED;
6992
6993                 if (!netif_running(bp->dev))
6994                         bnx2_shutdown_chip(bp);
6995                 else {
6996                         bnx2_init_nic(bp, 1);
6997                         bnx2_netif_start(bp);
6998                 }
6999
7000                 /* wait for link up */
7001                 for (i = 0; i < 7; i++) {
7002                         if (bp->link_up)
7003                                 break;
7004                         msleep_interruptible(1000);
7005                 }
7006         }
7007
7008         if (bnx2_test_nvram(bp) != 0) {
7009                 buf[3] = 1;
7010                 etest->flags |= ETH_TEST_FL_FAILED;
7011         }
7012         if (bnx2_test_intr(bp) != 0) {
7013                 buf[4] = 1;
7014                 etest->flags |= ETH_TEST_FL_FAILED;
7015         }
7016
7017         if (bnx2_test_link(bp) != 0) {
7018                 buf[5] = 1;
7019                 etest->flags |= ETH_TEST_FL_FAILED;
7020
7021         }
7022         if (!netif_running(bp->dev))
7023                 bnx2_set_power_state(bp, PCI_D3hot);
7024 }
7025
7026 static void
7027 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7028 {
7029         switch (stringset) {
7030         case ETH_SS_STATS:
7031                 memcpy(buf, bnx2_stats_str_arr,
7032                         sizeof(bnx2_stats_str_arr));
7033                 break;
7034         case ETH_SS_TEST:
7035                 memcpy(buf, bnx2_tests_str_arr,
7036                         sizeof(bnx2_tests_str_arr));
7037                 break;
7038         }
7039 }
7040
7041 static void
7042 bnx2_get_ethtool_stats(struct net_device *dev,
7043                 struct ethtool_stats *stats, u64 *buf)
7044 {
7045         struct bnx2 *bp = netdev_priv(dev);
7046         int i;
7047         u32 *hw_stats = (u32 *) bp->stats_blk;
7048         u8 *stats_len_arr = NULL;
7049
7050         if (hw_stats == NULL) {
7051                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7052                 return;
7053         }
7054
7055         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7056             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7057             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7058             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7059                 stats_len_arr = bnx2_5706_stats_len_arr;
7060         else
7061                 stats_len_arr = bnx2_5708_stats_len_arr;
7062
7063         for (i = 0; i < BNX2_NUM_STATS; i++) {
7064                 if (stats_len_arr[i] == 0) {
7065                         /* skip this counter */
7066                         buf[i] = 0;
7067                         continue;
7068                 }
7069                 if (stats_len_arr[i] == 4) {
7070                         /* 4-byte counter */
7071                         buf[i] = (u64)
7072                                 *(hw_stats + bnx2_stats_offset_arr[i]);
7073                         continue;
7074                 }
7075                 /* 8-byte counter */
7076                 buf[i] = (((u64) *(hw_stats +
7077                                         bnx2_stats_offset_arr[i])) << 32) +
7078                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7079         }
7080 }
7081
7082 static int
7083 bnx2_phys_id(struct net_device *dev, u32 data)
7084 {
7085         struct bnx2 *bp = netdev_priv(dev);
7086         int i;
7087         u32 save;
7088
7089         bnx2_set_power_state(bp, PCI_D0);
7090
7091         if (data == 0)
7092                 data = 2;
7093
7094         save = REG_RD(bp, BNX2_MISC_CFG);
7095         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7096
7097         for (i = 0; i < (data * 2); i++) {
7098                 if ((i % 2) == 0) {
7099                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7100                 }
7101                 else {
7102                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7103                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7104                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7105                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7106                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7107                                 BNX2_EMAC_LED_TRAFFIC);
7108                 }
7109                 msleep_interruptible(500);
7110                 if (signal_pending(current))
7111                         break;
7112         }
7113         REG_WR(bp, BNX2_EMAC_LED, 0);
7114         REG_WR(bp, BNX2_MISC_CFG, save);
7115
7116         if (!netif_running(dev))
7117                 bnx2_set_power_state(bp, PCI_D3hot);
7118
7119         return 0;
7120 }
7121
7122 static int
7123 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7124 {
7125         struct bnx2 *bp = netdev_priv(dev);
7126
7127         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7128                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7129         else
7130                 return (ethtool_op_set_tx_csum(dev, data));
7131 }
7132
7133 static const struct ethtool_ops bnx2_ethtool_ops = {
7134         .get_settings           = bnx2_get_settings,
7135         .set_settings           = bnx2_set_settings,
7136         .get_drvinfo            = bnx2_get_drvinfo,
7137         .get_regs_len           = bnx2_get_regs_len,
7138         .get_regs               = bnx2_get_regs,
7139         .get_wol                = bnx2_get_wol,
7140         .set_wol                = bnx2_set_wol,
7141         .nway_reset             = bnx2_nway_reset,
7142         .get_link               = ethtool_op_get_link,
7143         .get_eeprom_len         = bnx2_get_eeprom_len,
7144         .get_eeprom             = bnx2_get_eeprom,
7145         .set_eeprom             = bnx2_set_eeprom,
7146         .get_coalesce           = bnx2_get_coalesce,
7147         .set_coalesce           = bnx2_set_coalesce,
7148         .get_ringparam          = bnx2_get_ringparam,
7149         .set_ringparam          = bnx2_set_ringparam,
7150         .get_pauseparam         = bnx2_get_pauseparam,
7151         .set_pauseparam         = bnx2_set_pauseparam,
7152         .get_rx_csum            = bnx2_get_rx_csum,
7153         .set_rx_csum            = bnx2_set_rx_csum,
7154         .set_tx_csum            = bnx2_set_tx_csum,
7155         .set_sg                 = ethtool_op_set_sg,
7156         .set_tso                = bnx2_set_tso,
7157         .self_test              = bnx2_self_test,
7158         .get_strings            = bnx2_get_strings,
7159         .phys_id                = bnx2_phys_id,
7160         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7161         .get_sset_count         = bnx2_get_sset_count,
7162 };
7163
7164 /* Called with rtnl_lock */
7165 static int
7166 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7167 {
7168         struct mii_ioctl_data *data = if_mii(ifr);
7169         struct bnx2 *bp = netdev_priv(dev);
7170         int err;
7171
7172         switch(cmd) {
7173         case SIOCGMIIPHY:
7174                 data->phy_id = bp->phy_addr;
7175
7176                 /* fallthru */
7177         case SIOCGMIIREG: {
7178                 u32 mii_regval;
7179
7180                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7181                         return -EOPNOTSUPP;
7182
7183                 if (!netif_running(dev))
7184                         return -EAGAIN;
7185
7186                 spin_lock_bh(&bp->phy_lock);
7187                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7188                 spin_unlock_bh(&bp->phy_lock);
7189
7190                 data->val_out = mii_regval;
7191
7192                 return err;
7193         }
7194
7195         case SIOCSMIIREG:
7196                 if (!capable(CAP_NET_ADMIN))
7197                         return -EPERM;
7198
7199                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7200                         return -EOPNOTSUPP;
7201
7202                 if (!netif_running(dev))
7203                         return -EAGAIN;
7204
7205                 spin_lock_bh(&bp->phy_lock);
7206                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7207                 spin_unlock_bh(&bp->phy_lock);
7208
7209                 return err;
7210
7211         default:
7212                 /* do nothing */
7213                 break;
7214         }
7215         return -EOPNOTSUPP;
7216 }
7217
7218 /* Called with rtnl_lock */
7219 static int
7220 bnx2_change_mac_addr(struct net_device *dev, void *p)
7221 {
7222         struct sockaddr *addr = p;
7223         struct bnx2 *bp = netdev_priv(dev);
7224
7225         if (!is_valid_ether_addr(addr->sa_data))
7226                 return -EINVAL;
7227
7228         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7229         if (netif_running(dev))
7230                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7231
7232         return 0;
7233 }
7234
7235 /* Called with rtnl_lock */
7236 static int
7237 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7238 {
7239         struct bnx2 *bp = netdev_priv(dev);
7240
7241         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7242                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7243                 return -EINVAL;
7244
7245         dev->mtu = new_mtu;
7246         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7247 }
7248
7249 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7250 static void
7251 poll_bnx2(struct net_device *dev)
7252 {
7253         struct bnx2 *bp = netdev_priv(dev);
7254         int i;
7255
7256         for (i = 0; i < bp->irq_nvecs; i++) {
7257                 disable_irq(bp->irq_tbl[i].vector);
7258                 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7259                 enable_irq(bp->irq_tbl[i].vector);
7260         }
7261 }
7262 #endif
7263
7264 static void __devinit
7265 bnx2_get_5709_media(struct bnx2 *bp)
7266 {
7267         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7268         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7269         u32 strap;
7270
7271         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7272                 return;
7273         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7274                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7275                 return;
7276         }
7277
7278         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7279                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7280         else
7281                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7282
7283         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7284                 switch (strap) {
7285                 case 0x4:
7286                 case 0x5:
7287                 case 0x6:
7288                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7289                         return;
7290                 }
7291         } else {
7292                 switch (strap) {
7293                 case 0x1:
7294                 case 0x2:
7295                 case 0x4:
7296                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7297                         return;
7298                 }
7299         }
7300 }
7301
7302 static void __devinit
7303 bnx2_get_pci_speed(struct bnx2 *bp)
7304 {
7305         u32 reg;
7306
7307         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7308         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7309                 u32 clkreg;
7310
7311                 bp->flags |= BNX2_FLAG_PCIX;
7312
7313                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7314
7315                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7316                 switch (clkreg) {
7317                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7318                         bp->bus_speed_mhz = 133;
7319                         break;
7320
7321                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7322                         bp->bus_speed_mhz = 100;
7323                         break;
7324
7325                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7326                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7327                         bp->bus_speed_mhz = 66;
7328                         break;
7329
7330                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7331                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7332                         bp->bus_speed_mhz = 50;
7333                         break;
7334
7335                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7336                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7337                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7338                         bp->bus_speed_mhz = 33;
7339                         break;
7340                 }
7341         }
7342         else {
7343                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7344                         bp->bus_speed_mhz = 66;
7345                 else
7346                         bp->bus_speed_mhz = 33;
7347         }
7348
7349         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7350                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7351
7352 }
7353
7354 static int __devinit
7355 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7356 {
7357         struct bnx2 *bp;
7358         unsigned long mem_len;
7359         int rc, i, j;
7360         u32 reg;
7361         u64 dma_mask, persist_dma_mask;
7362
7363         SET_NETDEV_DEV(dev, &pdev->dev);
7364         bp = netdev_priv(dev);
7365
7366         bp->flags = 0;
7367         bp->phy_flags = 0;
7368
7369         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7370         rc = pci_enable_device(pdev);
7371         if (rc) {
7372                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7373                 goto err_out;
7374         }
7375
7376         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7377                 dev_err(&pdev->dev,
7378                         "Cannot find PCI device base address, aborting.\n");
7379                 rc = -ENODEV;
7380                 goto err_out_disable;
7381         }
7382
7383         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7384         if (rc) {
7385                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7386                 goto err_out_disable;
7387         }
7388
7389         pci_set_master(pdev);
7390         pci_save_state(pdev);
7391
7392         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7393         if (bp->pm_cap == 0) {
7394                 dev_err(&pdev->dev,
7395                         "Cannot find power management capability, aborting.\n");
7396                 rc = -EIO;
7397                 goto err_out_release;
7398         }
7399
7400         bp->dev = dev;
7401         bp->pdev = pdev;
7402
7403         spin_lock_init(&bp->phy_lock);
7404         spin_lock_init(&bp->indirect_lock);
7405         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7406
7407         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7408         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7409         dev->mem_end = dev->mem_start + mem_len;
7410         dev->irq = pdev->irq;
7411
7412         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7413
7414         if (!bp->regview) {
7415                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7416                 rc = -ENOMEM;
7417                 goto err_out_release;
7418         }
7419
7420         /* Configure byte swap and enable write to the reg_window registers.
7421          * Rely on CPU to do target byte swapping on big endian systems
7422          * The chip's target access swapping will not swap all accesses
7423          */
7424         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7425                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7426                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7427
7428         bnx2_set_power_state(bp, PCI_D0);
7429
7430         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7431
7432         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7433                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7434                         dev_err(&pdev->dev,
7435                                 "Cannot find PCIE capability, aborting.\n");
7436                         rc = -EIO;
7437                         goto err_out_unmap;
7438                 }
7439                 bp->flags |= BNX2_FLAG_PCIE;
7440                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7441                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7442         } else {
7443                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7444                 if (bp->pcix_cap == 0) {
7445                         dev_err(&pdev->dev,
7446                                 "Cannot find PCIX capability, aborting.\n");
7447                         rc = -EIO;
7448                         goto err_out_unmap;
7449                 }
7450         }
7451
7452         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7453                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7454                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7455         }
7456
7457         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7458                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7459                         bp->flags |= BNX2_FLAG_MSI_CAP;
7460         }
7461
7462         /* 5708 cannot support DMA addresses > 40-bit.  */
7463         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7464                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7465         else
7466                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7467
7468         /* Configure DMA attributes. */
7469         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7470                 dev->features |= NETIF_F_HIGHDMA;
7471                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7472                 if (rc) {
7473                         dev_err(&pdev->dev,
7474                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7475                         goto err_out_unmap;
7476                 }
7477         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7478                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7479                 goto err_out_unmap;
7480         }
7481
7482         if (!(bp->flags & BNX2_FLAG_PCIE))
7483                 bnx2_get_pci_speed(bp);
7484
7485         /* 5706A0 may falsely detect SERR and PERR. */
7486         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7487                 reg = REG_RD(bp, PCI_COMMAND);
7488                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7489                 REG_WR(bp, PCI_COMMAND, reg);
7490         }
7491         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7492                 !(bp->flags & BNX2_FLAG_PCIX)) {
7493
7494                 dev_err(&pdev->dev,
7495                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7496                 goto err_out_unmap;
7497         }
7498
7499         bnx2_init_nvram(bp);
7500
7501         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7502
7503         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7504             BNX2_SHM_HDR_SIGNATURE_SIG) {
7505                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7506
7507                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7508         } else
7509                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7510
7511         /* Get the permanent MAC address.  First we need to make sure the
7512          * firmware is actually running.
7513          */
7514         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7515
7516         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7517             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7518                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7519                 rc = -ENODEV;
7520                 goto err_out_unmap;
7521         }
7522
7523         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7524         for (i = 0, j = 0; i < 3; i++) {
7525                 u8 num, k, skip0;
7526
7527                 num = (u8) (reg >> (24 - (i * 8)));
7528                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7529                         if (num >= k || !skip0 || k == 1) {
7530                                 bp->fw_version[j++] = (num / k) + '0';
7531                                 skip0 = 0;
7532                         }
7533                 }
7534                 if (i != 2)
7535                         bp->fw_version[j++] = '.';
7536         }
7537         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7538         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7539                 bp->wol = 1;
7540
7541         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7542                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7543
7544                 for (i = 0; i < 30; i++) {
7545                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7546                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7547                                 break;
7548                         msleep(10);
7549                 }
7550         }
7551         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7552         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7553         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7554             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7555                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7556
7557                 bp->fw_version[j++] = ' ';
7558                 for (i = 0; i < 3; i++) {
7559                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7560                         reg = swab32(reg);
7561                         memcpy(&bp->fw_version[j], &reg, 4);
7562                         j += 4;
7563                 }
7564         }
7565
7566         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7567         bp->mac_addr[0] = (u8) (reg >> 8);
7568         bp->mac_addr[1] = (u8) reg;
7569
7570         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7571         bp->mac_addr[2] = (u8) (reg >> 24);
7572         bp->mac_addr[3] = (u8) (reg >> 16);
7573         bp->mac_addr[4] = (u8) (reg >> 8);
7574         bp->mac_addr[5] = (u8) reg;
7575
7576         bp->tx_ring_size = MAX_TX_DESC_CNT;
7577         bnx2_set_rx_ring_size(bp, 255);
7578
7579         bp->rx_csum = 1;
7580
7581         bp->tx_quick_cons_trip_int = 20;
7582         bp->tx_quick_cons_trip = 20;
7583         bp->tx_ticks_int = 80;
7584         bp->tx_ticks = 80;
7585
7586         bp->rx_quick_cons_trip_int = 6;
7587         bp->rx_quick_cons_trip = 6;
7588         bp->rx_ticks_int = 18;
7589         bp->rx_ticks = 18;
7590
7591         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7592
7593         bp->current_interval = BNX2_TIMER_INTERVAL;
7594
7595         bp->phy_addr = 1;
7596
7597         /* Disable WOL support if we are running on a SERDES chip. */
7598         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7599                 bnx2_get_5709_media(bp);
7600         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7601                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7602
7603         bp->phy_port = PORT_TP;
7604         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7605                 bp->phy_port = PORT_FIBRE;
7606                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7607                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7608                         bp->flags |= BNX2_FLAG_NO_WOL;
7609                         bp->wol = 0;
7610                 }
7611                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7612                         /* Don't do parallel detect on this board because of
7613                          * some board problems.  The link will not go down
7614                          * if we do parallel detect.
7615                          */
7616                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7617                             pdev->subsystem_device == 0x310c)
7618                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7619                 } else {
7620                         bp->phy_addr = 2;
7621                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7622                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7623                 }
7624         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7625                    CHIP_NUM(bp) == CHIP_NUM_5708)
7626                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7627         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7628                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7629                   CHIP_REV(bp) == CHIP_REV_Bx))
7630                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7631
7632         bnx2_init_fw_cap(bp);
7633
7634         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7635             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7636             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7637             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7638                 bp->flags |= BNX2_FLAG_NO_WOL;
7639                 bp->wol = 0;
7640         }
7641
7642         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7643                 bp->tx_quick_cons_trip_int =
7644                         bp->tx_quick_cons_trip;
7645                 bp->tx_ticks_int = bp->tx_ticks;
7646                 bp->rx_quick_cons_trip_int =
7647                         bp->rx_quick_cons_trip;
7648                 bp->rx_ticks_int = bp->rx_ticks;
7649                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7650                 bp->com_ticks_int = bp->com_ticks;
7651                 bp->cmd_ticks_int = bp->cmd_ticks;
7652         }
7653
7654         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7655          *
7656          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7657          * with byte enables disabled on the unused 32-bit word.  This is legal
7658          * but causes problems on the AMD 8132 which will eventually stop
7659          * responding after a while.
7660          *
7661          * AMD believes this incompatibility is unique to the 5706, and
7662          * prefers to locally disable MSI rather than globally disabling it.
7663          */
7664         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7665                 struct pci_dev *amd_8132 = NULL;
7666
7667                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7668                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7669                                                   amd_8132))) {
7670
7671                         if (amd_8132->revision >= 0x10 &&
7672                             amd_8132->revision <= 0x13) {
7673                                 disable_msi = 1;
7674                                 pci_dev_put(amd_8132);
7675                                 break;
7676                         }
7677                 }
7678         }
7679
7680         bnx2_set_default_link(bp);
7681         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7682
7683         init_timer(&bp->timer);
7684         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7685         bp->timer.data = (unsigned long) bp;
7686         bp->timer.function = bnx2_timer;
7687
7688         return 0;
7689
7690 err_out_unmap:
7691         if (bp->regview) {
7692                 iounmap(bp->regview);
7693                 bp->regview = NULL;
7694         }
7695
7696 err_out_release:
7697         pci_release_regions(pdev);
7698
7699 err_out_disable:
7700         pci_disable_device(pdev);
7701         pci_set_drvdata(pdev, NULL);
7702
7703 err_out:
7704         return rc;
7705 }
7706
7707 static char * __devinit
7708 bnx2_bus_string(struct bnx2 *bp, char *str)
7709 {
7710         char *s = str;
7711
7712         if (bp->flags & BNX2_FLAG_PCIE) {
7713                 s += sprintf(s, "PCI Express");
7714         } else {
7715                 s += sprintf(s, "PCI");
7716                 if (bp->flags & BNX2_FLAG_PCIX)
7717                         s += sprintf(s, "-X");
7718                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7719                         s += sprintf(s, " 32-bit");
7720                 else
7721                         s += sprintf(s, " 64-bit");
7722                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7723         }
7724         return str;
7725 }
7726
7727 static void __devinit
7728 bnx2_init_napi(struct bnx2 *bp)
7729 {
7730         int i;
7731
7732         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7733                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7734                 int (*poll)(struct napi_struct *, int);
7735
7736                 if (i == 0)
7737                         poll = bnx2_poll;
7738                 else
7739                         poll = bnx2_poll_msix;
7740
7741                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7742                 bnapi->bp = bp;
7743         }
7744 }
7745
7746 static const struct net_device_ops bnx2_netdev_ops = {
7747         .ndo_open               = bnx2_open,
7748         .ndo_start_xmit         = bnx2_start_xmit,
7749         .ndo_stop               = bnx2_close,
7750         .ndo_get_stats          = bnx2_get_stats,
7751         .ndo_set_rx_mode        = bnx2_set_rx_mode,
7752         .ndo_do_ioctl           = bnx2_ioctl,
7753         .ndo_validate_addr      = eth_validate_addr,
7754         .ndo_set_mac_address    = bnx2_change_mac_addr,
7755         .ndo_change_mtu         = bnx2_change_mtu,
7756         .ndo_tx_timeout         = bnx2_tx_timeout,
7757 #ifdef BCM_VLAN
7758         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
7759 #endif
7760 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7761         .ndo_poll_controller    = poll_bnx2,
7762 #endif
7763 };
7764
7765 static int __devinit
7766 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7767 {
7768         static int version_printed = 0;
7769         struct net_device *dev = NULL;
7770         struct bnx2 *bp;
7771         int rc;
7772         char str[40];
7773
7774         if (version_printed++ == 0)
7775                 printk(KERN_INFO "%s", version);
7776
7777         /* dev zeroed in init_etherdev */
7778         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7779
7780         if (!dev)
7781                 return -ENOMEM;
7782
7783         rc = bnx2_init_board(pdev, dev);
7784         if (rc < 0) {
7785                 free_netdev(dev);
7786                 return rc;
7787         }
7788
7789         dev->netdev_ops = &bnx2_netdev_ops;
7790         dev->watchdog_timeo = TX_TIMEOUT;
7791         dev->ethtool_ops = &bnx2_ethtool_ops;
7792
7793         bp = netdev_priv(dev);
7794         bnx2_init_napi(bp);
7795
7796         pci_set_drvdata(pdev, dev);
7797
7798         memcpy(dev->dev_addr, bp->mac_addr, 6);
7799         memcpy(dev->perm_addr, bp->mac_addr, 6);
7800
7801         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7802         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7803                 dev->features |= NETIF_F_IPV6_CSUM;
7804
7805 #ifdef BCM_VLAN
7806         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7807 #endif
7808         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7809         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7810                 dev->features |= NETIF_F_TSO6;
7811
7812         if ((rc = register_netdev(dev))) {
7813                 dev_err(&pdev->dev, "Cannot register net device\n");
7814                 if (bp->regview)
7815                         iounmap(bp->regview);
7816                 pci_release_regions(pdev);
7817                 pci_disable_device(pdev);
7818                 pci_set_drvdata(pdev, NULL);
7819                 free_netdev(dev);
7820                 return rc;
7821         }
7822
7823         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7824                 "IRQ %d, node addr %pM\n",
7825                 dev->name,
7826                 board_info[ent->driver_data].name,
7827                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7828                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7829                 bnx2_bus_string(bp, str),
7830                 dev->base_addr,
7831                 bp->pdev->irq, dev->dev_addr);
7832
7833         return 0;
7834 }
7835
7836 static void __devexit
7837 bnx2_remove_one(struct pci_dev *pdev)
7838 {
7839         struct net_device *dev = pci_get_drvdata(pdev);
7840         struct bnx2 *bp = netdev_priv(dev);
7841
7842         flush_scheduled_work();
7843
7844         unregister_netdev(dev);
7845
7846         if (bp->regview)
7847                 iounmap(bp->regview);
7848
7849         free_netdev(dev);
7850         pci_release_regions(pdev);
7851         pci_disable_device(pdev);
7852         pci_set_drvdata(pdev, NULL);
7853 }
7854
7855 static int
7856 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7857 {
7858         struct net_device *dev = pci_get_drvdata(pdev);
7859         struct bnx2 *bp = netdev_priv(dev);
7860
7861         /* PCI register 4 needs to be saved whether netif_running() or not.
7862          * MSI address and data need to be saved if using MSI and
7863          * netif_running().
7864          */
7865         pci_save_state(pdev);
7866         if (!netif_running(dev))
7867                 return 0;
7868
7869         flush_scheduled_work();
7870         bnx2_netif_stop(bp);
7871         netif_device_detach(dev);
7872         del_timer_sync(&bp->timer);
7873         bnx2_shutdown_chip(bp);
7874         bnx2_free_skbs(bp);
7875         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7876         return 0;
7877 }
7878
7879 static int
7880 bnx2_resume(struct pci_dev *pdev)
7881 {
7882         struct net_device *dev = pci_get_drvdata(pdev);
7883         struct bnx2 *bp = netdev_priv(dev);
7884
7885         pci_restore_state(pdev);
7886         if (!netif_running(dev))
7887                 return 0;
7888
7889         bnx2_set_power_state(bp, PCI_D0);
7890         netif_device_attach(dev);
7891         bnx2_init_nic(bp, 1);
7892         bnx2_netif_start(bp);
7893         return 0;
7894 }
7895
7896 /**
7897  * bnx2_io_error_detected - called when PCI error is detected
7898  * @pdev: Pointer to PCI device
7899  * @state: The current pci connection state
7900  *
7901  * This function is called after a PCI bus error affecting
7902  * this device has been detected.
7903  */
7904 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7905                                                pci_channel_state_t state)
7906 {
7907         struct net_device *dev = pci_get_drvdata(pdev);
7908         struct bnx2 *bp = netdev_priv(dev);
7909
7910         rtnl_lock();
7911         netif_device_detach(dev);
7912
7913         if (netif_running(dev)) {
7914                 bnx2_netif_stop(bp);
7915                 del_timer_sync(&bp->timer);
7916                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7917         }
7918
7919         pci_disable_device(pdev);
7920         rtnl_unlock();
7921
7922         /* Request a slot slot reset. */
7923         return PCI_ERS_RESULT_NEED_RESET;
7924 }
7925
7926 /**
7927  * bnx2_io_slot_reset - called after the pci bus has been reset.
7928  * @pdev: Pointer to PCI device
7929  *
7930  * Restart the card from scratch, as if from a cold-boot.
7931  */
7932 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7933 {
7934         struct net_device *dev = pci_get_drvdata(pdev);
7935         struct bnx2 *bp = netdev_priv(dev);
7936
7937         rtnl_lock();
7938         if (pci_enable_device(pdev)) {
7939                 dev_err(&pdev->dev,
7940                         "Cannot re-enable PCI device after reset.\n");
7941                 rtnl_unlock();
7942                 return PCI_ERS_RESULT_DISCONNECT;
7943         }
7944         pci_set_master(pdev);
7945         pci_restore_state(pdev);
7946
7947         if (netif_running(dev)) {
7948                 bnx2_set_power_state(bp, PCI_D0);
7949                 bnx2_init_nic(bp, 1);
7950         }
7951
7952         rtnl_unlock();
7953         return PCI_ERS_RESULT_RECOVERED;
7954 }
7955
7956 /**
7957  * bnx2_io_resume - called when traffic can start flowing again.
7958  * @pdev: Pointer to PCI device
7959  *
7960  * This callback is called when the error recovery driver tells us that
7961  * its OK to resume normal operation.
7962  */
7963 static void bnx2_io_resume(struct pci_dev *pdev)
7964 {
7965         struct net_device *dev = pci_get_drvdata(pdev);
7966         struct bnx2 *bp = netdev_priv(dev);
7967
7968         rtnl_lock();
7969         if (netif_running(dev))
7970                 bnx2_netif_start(bp);
7971
7972         netif_device_attach(dev);
7973         rtnl_unlock();
7974 }
7975
7976 static struct pci_error_handlers bnx2_err_handler = {
7977         .error_detected = bnx2_io_error_detected,
7978         .slot_reset     = bnx2_io_slot_reset,
7979         .resume         = bnx2_io_resume,
7980 };
7981
7982 static struct pci_driver bnx2_pci_driver = {
7983         .name           = DRV_MODULE_NAME,
7984         .id_table       = bnx2_pci_tbl,
7985         .probe          = bnx2_init_one,
7986         .remove         = __devexit_p(bnx2_remove_one),
7987         .suspend        = bnx2_suspend,
7988         .resume         = bnx2_resume,
7989         .err_handler    = &bnx2_err_handler,
7990 };
7991
7992 static int __init bnx2_init(void)
7993 {
7994         return pci_register_driver(&bnx2_pci_driver);
7995 }
7996
7997 static void __exit bnx2_cleanup(void)
7998 {
7999         pci_unregister_driver(&bnx2_pci_driver);
8000 }
8001
8002 module_init(bnx2_init);
8003 module_exit(bnx2_cleanup);
8004
8005
8006