bnx2 annotations
[sfrench/cifs-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.2"
60 #define DRV_MODULE_RELDATE      "January 21, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bnapi->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         int i;
403         struct bnx2_napi *bnapi;
404
405         for (i = 0; i < bp->irq_nvecs; i++) {
406                 bnapi = &bp->bnx2_napi[i];
407                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
408                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
409         }
410         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
411 }
412
413 static void
414 bnx2_enable_int(struct bnx2 *bp)
415 {
416         int i;
417         struct bnx2_napi *bnapi;
418
419         for (i = 0; i < bp->irq_nvecs; i++) {
420                 bnapi = &bp->bnx2_napi[i];
421
422                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
423                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
424                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
425                        bnapi->last_status_idx);
426
427                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
429                        bnapi->last_status_idx);
430         }
431         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
432 }
433
434 static void
435 bnx2_disable_int_sync(struct bnx2 *bp)
436 {
437         int i;
438
439         atomic_inc(&bp->intr_sem);
440         bnx2_disable_int(bp);
441         for (i = 0; i < bp->irq_nvecs; i++)
442                 synchronize_irq(bp->irq_tbl[i].vector);
443 }
444
445 static void
446 bnx2_napi_disable(struct bnx2 *bp)
447 {
448         int i;
449
450         for (i = 0; i < bp->irq_nvecs; i++)
451                 napi_disable(&bp->bnx2_napi[i].napi);
452 }
453
454 static void
455 bnx2_napi_enable(struct bnx2 *bp)
456 {
457         int i;
458
459         for (i = 0; i < bp->irq_nvecs; i++)
460                 napi_enable(&bp->bnx2_napi[i].napi);
461 }
462
463 static void
464 bnx2_netif_stop(struct bnx2 *bp)
465 {
466         bnx2_disable_int_sync(bp);
467         if (netif_running(bp->dev)) {
468                 bnx2_napi_disable(bp);
469                 netif_tx_disable(bp->dev);
470                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
471         }
472 }
473
474 static void
475 bnx2_netif_start(struct bnx2 *bp)
476 {
477         if (atomic_dec_and_test(&bp->intr_sem)) {
478                 if (netif_running(bp->dev)) {
479                         netif_wake_queue(bp->dev);
480                         bnx2_napi_enable(bp);
481                         bnx2_enable_int(bp);
482                 }
483         }
484 }
485
486 static void
487 bnx2_free_mem(struct bnx2 *bp)
488 {
489         int i;
490
491         for (i = 0; i < bp->ctx_pages; i++) {
492                 if (bp->ctx_blk[i]) {
493                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
494                                             bp->ctx_blk[i],
495                                             bp->ctx_blk_mapping[i]);
496                         bp->ctx_blk[i] = NULL;
497                 }
498         }
499         if (bp->status_blk) {
500                 pci_free_consistent(bp->pdev, bp->status_stats_size,
501                                     bp->status_blk, bp->status_blk_mapping);
502                 bp->status_blk = NULL;
503                 bp->stats_blk = NULL;
504         }
505         if (bp->tx_desc_ring) {
506                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
507                                     bp->tx_desc_ring, bp->tx_desc_mapping);
508                 bp->tx_desc_ring = NULL;
509         }
510         kfree(bp->tx_buf_ring);
511         bp->tx_buf_ring = NULL;
512         for (i = 0; i < bp->rx_max_ring; i++) {
513                 if (bp->rx_desc_ring[i])
514                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
515                                             bp->rx_desc_ring[i],
516                                             bp->rx_desc_mapping[i]);
517                 bp->rx_desc_ring[i] = NULL;
518         }
519         vfree(bp->rx_buf_ring);
520         bp->rx_buf_ring = NULL;
521         for (i = 0; i < bp->rx_max_pg_ring; i++) {
522                 if (bp->rx_pg_desc_ring[i])
523                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
524                                             bp->rx_pg_desc_ring[i],
525                                             bp->rx_pg_desc_mapping[i]);
526                 bp->rx_pg_desc_ring[i] = NULL;
527         }
528         if (bp->rx_pg_ring)
529                 vfree(bp->rx_pg_ring);
530         bp->rx_pg_ring = NULL;
531 }
532
533 static int
534 bnx2_alloc_mem(struct bnx2 *bp)
535 {
536         int i, status_blk_size;
537
538         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
539         if (bp->tx_buf_ring == NULL)
540                 return -ENOMEM;
541
542         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
543                                                 &bp->tx_desc_mapping);
544         if (bp->tx_desc_ring == NULL)
545                 goto alloc_mem_err;
546
547         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
548         if (bp->rx_buf_ring == NULL)
549                 goto alloc_mem_err;
550
551         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
552
553         for (i = 0; i < bp->rx_max_ring; i++) {
554                 bp->rx_desc_ring[i] =
555                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
556                                              &bp->rx_desc_mapping[i]);
557                 if (bp->rx_desc_ring[i] == NULL)
558                         goto alloc_mem_err;
559
560         }
561
562         if (bp->rx_pg_ring_size) {
563                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
564                                          bp->rx_max_pg_ring);
565                 if (bp->rx_pg_ring == NULL)
566                         goto alloc_mem_err;
567
568                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
569                        bp->rx_max_pg_ring);
570         }
571
572         for (i = 0; i < bp->rx_max_pg_ring; i++) {
573                 bp->rx_pg_desc_ring[i] =
574                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
575                                              &bp->rx_pg_desc_mapping[i]);
576                 if (bp->rx_pg_desc_ring[i] == NULL)
577                         goto alloc_mem_err;
578
579         }
580
581         /* Combine status and statistics blocks into one allocation. */
582         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
583         if (bp->flags & BNX2_FLAG_MSIX_CAP)
584                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
585                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
586         bp->status_stats_size = status_blk_size +
587                                 sizeof(struct statistics_block);
588
589         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
590                                               &bp->status_blk_mapping);
591         if (bp->status_blk == NULL)
592                 goto alloc_mem_err;
593
594         memset(bp->status_blk, 0, bp->status_stats_size);
595
596         bp->bnx2_napi[0].status_blk = bp->status_blk;
597         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
598                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
599                         struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
600
601                         bnapi->status_blk_msix = (void *)
602                                 ((unsigned long) bp->status_blk +
603                                  BNX2_SBLK_MSIX_ALIGN_SIZE * i);
604                         bnapi->int_num = i << 24;
605                 }
606         }
607
608         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
609                                   status_blk_size);
610
611         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
612
613         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
614                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
615                 if (bp->ctx_pages == 0)
616                         bp->ctx_pages = 1;
617                 for (i = 0; i < bp->ctx_pages; i++) {
618                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
619                                                 BCM_PAGE_SIZE,
620                                                 &bp->ctx_blk_mapping[i]);
621                         if (bp->ctx_blk[i] == NULL)
622                                 goto alloc_mem_err;
623                 }
624         }
625         return 0;
626
627 alloc_mem_err:
628         bnx2_free_mem(bp);
629         return -ENOMEM;
630 }
631
632 static void
633 bnx2_report_fw_link(struct bnx2 *bp)
634 {
635         u32 fw_link_status = 0;
636
637         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
638                 return;
639
640         if (bp->link_up) {
641                 u32 bmsr;
642
643                 switch (bp->line_speed) {
644                 case SPEED_10:
645                         if (bp->duplex == DUPLEX_HALF)
646                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
647                         else
648                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
649                         break;
650                 case SPEED_100:
651                         if (bp->duplex == DUPLEX_HALF)
652                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
653                         else
654                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
655                         break;
656                 case SPEED_1000:
657                         if (bp->duplex == DUPLEX_HALF)
658                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
659                         else
660                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
661                         break;
662                 case SPEED_2500:
663                         if (bp->duplex == DUPLEX_HALF)
664                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
665                         else
666                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
667                         break;
668                 }
669
670                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
671
672                 if (bp->autoneg) {
673                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
674
675                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
676                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
677
678                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
679                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
680                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
681                         else
682                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
683                 }
684         }
685         else
686                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
687
688         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
689 }
690
691 static char *
692 bnx2_xceiver_str(struct bnx2 *bp)
693 {
694         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
695                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
696                  "Copper"));
697 }
698
699 static void
700 bnx2_report_link(struct bnx2 *bp)
701 {
702         if (bp->link_up) {
703                 netif_carrier_on(bp->dev);
704                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
705                        bnx2_xceiver_str(bp));
706
707                 printk("%d Mbps ", bp->line_speed);
708
709                 if (bp->duplex == DUPLEX_FULL)
710                         printk("full duplex");
711                 else
712                         printk("half duplex");
713
714                 if (bp->flow_ctrl) {
715                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
716                                 printk(", receive ");
717                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
718                                         printk("& transmit ");
719                         }
720                         else {
721                                 printk(", transmit ");
722                         }
723                         printk("flow control ON");
724                 }
725                 printk("\n");
726         }
727         else {
728                 netif_carrier_off(bp->dev);
729                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
730                        bnx2_xceiver_str(bp));
731         }
732
733         bnx2_report_fw_link(bp);
734 }
735
736 static void
737 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
738 {
739         u32 local_adv, remote_adv;
740
741         bp->flow_ctrl = 0;
742         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
743                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
744
745                 if (bp->duplex == DUPLEX_FULL) {
746                         bp->flow_ctrl = bp->req_flow_ctrl;
747                 }
748                 return;
749         }
750
751         if (bp->duplex != DUPLEX_FULL) {
752                 return;
753         }
754
755         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
756             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
757                 u32 val;
758
759                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
760                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
761                         bp->flow_ctrl |= FLOW_CTRL_TX;
762                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
763                         bp->flow_ctrl |= FLOW_CTRL_RX;
764                 return;
765         }
766
767         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
768         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
769
770         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
771                 u32 new_local_adv = 0;
772                 u32 new_remote_adv = 0;
773
774                 if (local_adv & ADVERTISE_1000XPAUSE)
775                         new_local_adv |= ADVERTISE_PAUSE_CAP;
776                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
777                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
778                 if (remote_adv & ADVERTISE_1000XPAUSE)
779                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
780                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
781                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
782
783                 local_adv = new_local_adv;
784                 remote_adv = new_remote_adv;
785         }
786
787         /* See Table 28B-3 of 802.3ab-1999 spec. */
788         if (local_adv & ADVERTISE_PAUSE_CAP) {
789                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
790                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
791                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
792                         }
793                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
794                                 bp->flow_ctrl = FLOW_CTRL_RX;
795                         }
796                 }
797                 else {
798                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
799                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
800                         }
801                 }
802         }
803         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
804                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
805                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
806
807                         bp->flow_ctrl = FLOW_CTRL_TX;
808                 }
809         }
810 }
811
812 static int
813 bnx2_5709s_linkup(struct bnx2 *bp)
814 {
815         u32 val, speed;
816
817         bp->link_up = 1;
818
819         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
820         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
821         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
822
823         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
824                 bp->line_speed = bp->req_line_speed;
825                 bp->duplex = bp->req_duplex;
826                 return 0;
827         }
828         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
829         switch (speed) {
830                 case MII_BNX2_GP_TOP_AN_SPEED_10:
831                         bp->line_speed = SPEED_10;
832                         break;
833                 case MII_BNX2_GP_TOP_AN_SPEED_100:
834                         bp->line_speed = SPEED_100;
835                         break;
836                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
837                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
838                         bp->line_speed = SPEED_1000;
839                         break;
840                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
841                         bp->line_speed = SPEED_2500;
842                         break;
843         }
844         if (val & MII_BNX2_GP_TOP_AN_FD)
845                 bp->duplex = DUPLEX_FULL;
846         else
847                 bp->duplex = DUPLEX_HALF;
848         return 0;
849 }
850
851 static int
852 bnx2_5708s_linkup(struct bnx2 *bp)
853 {
854         u32 val;
855
856         bp->link_up = 1;
857         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
859                 case BCM5708S_1000X_STAT1_SPEED_10:
860                         bp->line_speed = SPEED_10;
861                         break;
862                 case BCM5708S_1000X_STAT1_SPEED_100:
863                         bp->line_speed = SPEED_100;
864                         break;
865                 case BCM5708S_1000X_STAT1_SPEED_1G:
866                         bp->line_speed = SPEED_1000;
867                         break;
868                 case BCM5708S_1000X_STAT1_SPEED_2G5:
869                         bp->line_speed = SPEED_2500;
870                         break;
871         }
872         if (val & BCM5708S_1000X_STAT1_FD)
873                 bp->duplex = DUPLEX_FULL;
874         else
875                 bp->duplex = DUPLEX_HALF;
876
877         return 0;
878 }
879
880 static int
881 bnx2_5706s_linkup(struct bnx2 *bp)
882 {
883         u32 bmcr, local_adv, remote_adv, common;
884
885         bp->link_up = 1;
886         bp->line_speed = SPEED_1000;
887
888         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889         if (bmcr & BMCR_FULLDPLX) {
890                 bp->duplex = DUPLEX_FULL;
891         }
892         else {
893                 bp->duplex = DUPLEX_HALF;
894         }
895
896         if (!(bmcr & BMCR_ANENABLE)) {
897                 return 0;
898         }
899
900         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
901         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
902
903         common = local_adv & remote_adv;
904         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
905
906                 if (common & ADVERTISE_1000XFULL) {
907                         bp->duplex = DUPLEX_FULL;
908                 }
909                 else {
910                         bp->duplex = DUPLEX_HALF;
911                 }
912         }
913
914         return 0;
915 }
916
917 static int
918 bnx2_copper_linkup(struct bnx2 *bp)
919 {
920         u32 bmcr;
921
922         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
923         if (bmcr & BMCR_ANENABLE) {
924                 u32 local_adv, remote_adv, common;
925
926                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
927                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
928
929                 common = local_adv & (remote_adv >> 2);
930                 if (common & ADVERTISE_1000FULL) {
931                         bp->line_speed = SPEED_1000;
932                         bp->duplex = DUPLEX_FULL;
933                 }
934                 else if (common & ADVERTISE_1000HALF) {
935                         bp->line_speed = SPEED_1000;
936                         bp->duplex = DUPLEX_HALF;
937                 }
938                 else {
939                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
940                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
941
942                         common = local_adv & remote_adv;
943                         if (common & ADVERTISE_100FULL) {
944                                 bp->line_speed = SPEED_100;
945                                 bp->duplex = DUPLEX_FULL;
946                         }
947                         else if (common & ADVERTISE_100HALF) {
948                                 bp->line_speed = SPEED_100;
949                                 bp->duplex = DUPLEX_HALF;
950                         }
951                         else if (common & ADVERTISE_10FULL) {
952                                 bp->line_speed = SPEED_10;
953                                 bp->duplex = DUPLEX_FULL;
954                         }
955                         else if (common & ADVERTISE_10HALF) {
956                                 bp->line_speed = SPEED_10;
957                                 bp->duplex = DUPLEX_HALF;
958                         }
959                         else {
960                                 bp->line_speed = 0;
961                                 bp->link_up = 0;
962                         }
963                 }
964         }
965         else {
966                 if (bmcr & BMCR_SPEED100) {
967                         bp->line_speed = SPEED_100;
968                 }
969                 else {
970                         bp->line_speed = SPEED_10;
971                 }
972                 if (bmcr & BMCR_FULLDPLX) {
973                         bp->duplex = DUPLEX_FULL;
974                 }
975                 else {
976                         bp->duplex = DUPLEX_HALF;
977                 }
978         }
979
980         return 0;
981 }
982
983 static int
984 bnx2_set_mac_link(struct bnx2 *bp)
985 {
986         u32 val;
987
988         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
989         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
990                 (bp->duplex == DUPLEX_HALF)) {
991                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
992         }
993
994         /* Configure the EMAC mode register. */
995         val = REG_RD(bp, BNX2_EMAC_MODE);
996
997         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
998                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
999                 BNX2_EMAC_MODE_25G_MODE);
1000
1001         if (bp->link_up) {
1002                 switch (bp->line_speed) {
1003                         case SPEED_10:
1004                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1005                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1006                                         break;
1007                                 }
1008                                 /* fall through */
1009                         case SPEED_100:
1010                                 val |= BNX2_EMAC_MODE_PORT_MII;
1011                                 break;
1012                         case SPEED_2500:
1013                                 val |= BNX2_EMAC_MODE_25G_MODE;
1014                                 /* fall through */
1015                         case SPEED_1000:
1016                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1017                                 break;
1018                 }
1019         }
1020         else {
1021                 val |= BNX2_EMAC_MODE_PORT_GMII;
1022         }
1023
1024         /* Set the MAC to operate in the appropriate duplex mode. */
1025         if (bp->duplex == DUPLEX_HALF)
1026                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1027         REG_WR(bp, BNX2_EMAC_MODE, val);
1028
1029         /* Enable/disable rx PAUSE. */
1030         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1031
1032         if (bp->flow_ctrl & FLOW_CTRL_RX)
1033                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1034         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1035
1036         /* Enable/disable tx PAUSE. */
1037         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1038         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1039
1040         if (bp->flow_ctrl & FLOW_CTRL_TX)
1041                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1042         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1043
1044         /* Acknowledge the interrupt. */
1045         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1046
1047         return 0;
1048 }
1049
1050 static void
1051 bnx2_enable_bmsr1(struct bnx2 *bp)
1052 {
1053         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1054             (CHIP_NUM(bp) == CHIP_NUM_5709))
1055                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056                                MII_BNX2_BLK_ADDR_GP_STATUS);
1057 }
1058
1059 static void
1060 bnx2_disable_bmsr1(struct bnx2 *bp)
1061 {
1062         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1063             (CHIP_NUM(bp) == CHIP_NUM_5709))
1064                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1065                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1066 }
1067
1068 static int
1069 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1070 {
1071         u32 up1;
1072         int ret = 1;
1073
1074         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1075                 return 0;
1076
1077         if (bp->autoneg & AUTONEG_SPEED)
1078                 bp->advertising |= ADVERTISED_2500baseX_Full;
1079
1080         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1081                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1082
1083         bnx2_read_phy(bp, bp->mii_up1, &up1);
1084         if (!(up1 & BCM5708S_UP1_2G5)) {
1085                 up1 |= BCM5708S_UP1_2G5;
1086                 bnx2_write_phy(bp, bp->mii_up1, up1);
1087                 ret = 0;
1088         }
1089
1090         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1091                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094         return ret;
1095 }
1096
1097 static int
1098 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1099 {
1100         u32 up1;
1101         int ret = 0;
1102
1103         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1104                 return 0;
1105
1106         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1107                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1108
1109         bnx2_read_phy(bp, bp->mii_up1, &up1);
1110         if (up1 & BCM5708S_UP1_2G5) {
1111                 up1 &= ~BCM5708S_UP1_2G5;
1112                 bnx2_write_phy(bp, bp->mii_up1, up1);
1113                 ret = 1;
1114         }
1115
1116         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1117                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1118                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1119
1120         return ret;
1121 }
1122
1123 static void
1124 bnx2_enable_forced_2g5(struct bnx2 *bp)
1125 {
1126         u32 bmcr;
1127
1128         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1129                 return;
1130
1131         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1132                 u32 val;
1133
1134                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1135                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1136                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1137                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1138                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1139                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1148         }
1149
1150         if (bp->autoneg & AUTONEG_SPEED) {
1151                 bmcr &= ~BMCR_ANENABLE;
1152                 if (bp->req_duplex == DUPLEX_FULL)
1153                         bmcr |= BMCR_FULLDPLX;
1154         }
1155         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1156 }
1157
1158 static void
1159 bnx2_disable_forced_2g5(struct bnx2 *bp)
1160 {
1161         u32 bmcr;
1162
1163         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1164                 return;
1165
1166         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1167                 u32 val;
1168
1169                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1170                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1171                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1172                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1173                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1174
1175                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1176                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1177                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1178
1179         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1180                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1181                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1182         }
1183
1184         if (bp->autoneg & AUTONEG_SPEED)
1185                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1186         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1187 }
1188
1189 static void
1190 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1191 {
1192         u32 val;
1193
1194         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1195         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1196         if (start)
1197                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1198         else
1199                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1200 }
1201
1202 static int
1203 bnx2_set_link(struct bnx2 *bp)
1204 {
1205         u32 bmsr;
1206         u8 link_up;
1207
1208         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1209                 bp->link_up = 1;
1210                 return 0;
1211         }
1212
1213         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1214                 return 0;
1215
1216         link_up = bp->link_up;
1217
1218         bnx2_enable_bmsr1(bp);
1219         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1220         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1221         bnx2_disable_bmsr1(bp);
1222
1223         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1224             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1225                 u32 val;
1226
1227                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1228                         bnx2_5706s_force_link_dn(bp, 0);
1229                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1230                 }
1231                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1232                 if (val & BNX2_EMAC_STATUS_LINK)
1233                         bmsr |= BMSR_LSTATUS;
1234                 else
1235                         bmsr &= ~BMSR_LSTATUS;
1236         }
1237
1238         if (bmsr & BMSR_LSTATUS) {
1239                 bp->link_up = 1;
1240
1241                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1242                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1243                                 bnx2_5706s_linkup(bp);
1244                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1245                                 bnx2_5708s_linkup(bp);
1246                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1247                                 bnx2_5709s_linkup(bp);
1248                 }
1249                 else {
1250                         bnx2_copper_linkup(bp);
1251                 }
1252                 bnx2_resolve_flow_ctrl(bp);
1253         }
1254         else {
1255                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1256                     (bp->autoneg & AUTONEG_SPEED))
1257                         bnx2_disable_forced_2g5(bp);
1258
1259                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1260                         u32 bmcr;
1261
1262                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1263                         bmcr |= BMCR_ANENABLE;
1264                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1265
1266                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1267                 }
1268                 bp->link_up = 0;
1269         }
1270
1271         if (bp->link_up != link_up) {
1272                 bnx2_report_link(bp);
1273         }
1274
1275         bnx2_set_mac_link(bp);
1276
1277         return 0;
1278 }
1279
1280 static int
1281 bnx2_reset_phy(struct bnx2 *bp)
1282 {
1283         int i;
1284         u32 reg;
1285
1286         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1287
1288 #define PHY_RESET_MAX_WAIT 100
1289         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1290                 udelay(10);
1291
1292                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1293                 if (!(reg & BMCR_RESET)) {
1294                         udelay(20);
1295                         break;
1296                 }
1297         }
1298         if (i == PHY_RESET_MAX_WAIT) {
1299                 return -EBUSY;
1300         }
1301         return 0;
1302 }
1303
1304 static u32
1305 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1306 {
1307         u32 adv = 0;
1308
1309         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1310                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1311
1312                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1313                         adv = ADVERTISE_1000XPAUSE;
1314                 }
1315                 else {
1316                         adv = ADVERTISE_PAUSE_CAP;
1317                 }
1318         }
1319         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1320                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1321                         adv = ADVERTISE_1000XPSE_ASYM;
1322                 }
1323                 else {
1324                         adv = ADVERTISE_PAUSE_ASYM;
1325                 }
1326         }
1327         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1328                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1329                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1330                 }
1331                 else {
1332                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1333                 }
1334         }
1335         return adv;
1336 }
1337
1338 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1339
1340 static int
1341 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1342 {
1343         u32 speed_arg = 0, pause_adv;
1344
1345         pause_adv = bnx2_phy_get_pause_adv(bp);
1346
1347         if (bp->autoneg & AUTONEG_SPEED) {
1348                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1349                 if (bp->advertising & ADVERTISED_10baseT_Half)
1350                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1351                 if (bp->advertising & ADVERTISED_10baseT_Full)
1352                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1353                 if (bp->advertising & ADVERTISED_100baseT_Half)
1354                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1355                 if (bp->advertising & ADVERTISED_100baseT_Full)
1356                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1357                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1358                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1359                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1360                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1361         } else {
1362                 if (bp->req_line_speed == SPEED_2500)
1363                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1364                 else if (bp->req_line_speed == SPEED_1000)
1365                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1366                 else if (bp->req_line_speed == SPEED_100) {
1367                         if (bp->req_duplex == DUPLEX_FULL)
1368                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1369                         else
1370                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1371                 } else if (bp->req_line_speed == SPEED_10) {
1372                         if (bp->req_duplex == DUPLEX_FULL)
1373                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1374                         else
1375                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1376                 }
1377         }
1378
1379         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1380                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1381         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1382                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1383
1384         if (port == PORT_TP)
1385                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1386                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1387
1388         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1389
1390         spin_unlock_bh(&bp->phy_lock);
1391         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1392         spin_lock_bh(&bp->phy_lock);
1393
1394         return 0;
1395 }
1396
1397 static int
1398 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1399 {
1400         u32 adv, bmcr;
1401         u32 new_adv = 0;
1402
1403         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1404                 return (bnx2_setup_remote_phy(bp, port));
1405
1406         if (!(bp->autoneg & AUTONEG_SPEED)) {
1407                 u32 new_bmcr;
1408                 int force_link_down = 0;
1409
1410                 if (bp->req_line_speed == SPEED_2500) {
1411                         if (!bnx2_test_and_enable_2g5(bp))
1412                                 force_link_down = 1;
1413                 } else if (bp->req_line_speed == SPEED_1000) {
1414                         if (bnx2_test_and_disable_2g5(bp))
1415                                 force_link_down = 1;
1416                 }
1417                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1418                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1419
1420                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1421                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1422                 new_bmcr |= BMCR_SPEED1000;
1423
1424                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1425                         if (bp->req_line_speed == SPEED_2500)
1426                                 bnx2_enable_forced_2g5(bp);
1427                         else if (bp->req_line_speed == SPEED_1000) {
1428                                 bnx2_disable_forced_2g5(bp);
1429                                 new_bmcr &= ~0x2000;
1430                         }
1431
1432                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1433                         if (bp->req_line_speed == SPEED_2500)
1434                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1435                         else
1436                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1437                 }
1438
1439                 if (bp->req_duplex == DUPLEX_FULL) {
1440                         adv |= ADVERTISE_1000XFULL;
1441                         new_bmcr |= BMCR_FULLDPLX;
1442                 }
1443                 else {
1444                         adv |= ADVERTISE_1000XHALF;
1445                         new_bmcr &= ~BMCR_FULLDPLX;
1446                 }
1447                 if ((new_bmcr != bmcr) || (force_link_down)) {
1448                         /* Force a link down visible on the other side */
1449                         if (bp->link_up) {
1450                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1451                                                ~(ADVERTISE_1000XFULL |
1452                                                  ADVERTISE_1000XHALF));
1453                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1454                                         BMCR_ANRESTART | BMCR_ANENABLE);
1455
1456                                 bp->link_up = 0;
1457                                 netif_carrier_off(bp->dev);
1458                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1459                                 bnx2_report_link(bp);
1460                         }
1461                         bnx2_write_phy(bp, bp->mii_adv, adv);
1462                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1463                 } else {
1464                         bnx2_resolve_flow_ctrl(bp);
1465                         bnx2_set_mac_link(bp);
1466                 }
1467                 return 0;
1468         }
1469
1470         bnx2_test_and_enable_2g5(bp);
1471
1472         if (bp->advertising & ADVERTISED_1000baseT_Full)
1473                 new_adv |= ADVERTISE_1000XFULL;
1474
1475         new_adv |= bnx2_phy_get_pause_adv(bp);
1476
1477         bnx2_read_phy(bp, bp->mii_adv, &adv);
1478         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479
1480         bp->serdes_an_pending = 0;
1481         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1482                 /* Force a link down visible on the other side */
1483                 if (bp->link_up) {
1484                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1485                         spin_unlock_bh(&bp->phy_lock);
1486                         msleep(20);
1487                         spin_lock_bh(&bp->phy_lock);
1488                 }
1489
1490                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1491                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1492                         BMCR_ANENABLE);
1493                 /* Speed up link-up time when the link partner
1494                  * does not autonegotiate which is very common
1495                  * in blade servers. Some blade servers use
1496                  * IPMI for kerboard input and it's important
1497                  * to minimize link disruptions. Autoneg. involves
1498                  * exchanging base pages plus 3 next pages and
1499                  * normally completes in about 120 msec.
1500                  */
1501                 bp->current_interval = SERDES_AN_TIMEOUT;
1502                 bp->serdes_an_pending = 1;
1503                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1504         } else {
1505                 bnx2_resolve_flow_ctrl(bp);
1506                 bnx2_set_mac_link(bp);
1507         }
1508
1509         return 0;
1510 }
1511
1512 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1513         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1514                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1515                 (ADVERTISED_1000baseT_Full)
1516
1517 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1518         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1519         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1520         ADVERTISED_1000baseT_Full)
1521
1522 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1523         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1524
1525 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1526
1527 static void
1528 bnx2_set_default_remote_link(struct bnx2 *bp)
1529 {
1530         u32 link;
1531
1532         if (bp->phy_port == PORT_TP)
1533                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1534         else
1535                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1536
1537         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1538                 bp->req_line_speed = 0;
1539                 bp->autoneg |= AUTONEG_SPEED;
1540                 bp->advertising = ADVERTISED_Autoneg;
1541                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1542                         bp->advertising |= ADVERTISED_10baseT_Half;
1543                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1544                         bp->advertising |= ADVERTISED_10baseT_Full;
1545                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1546                         bp->advertising |= ADVERTISED_100baseT_Half;
1547                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1548                         bp->advertising |= ADVERTISED_100baseT_Full;
1549                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1550                         bp->advertising |= ADVERTISED_1000baseT_Full;
1551                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1552                         bp->advertising |= ADVERTISED_2500baseX_Full;
1553         } else {
1554                 bp->autoneg = 0;
1555                 bp->advertising = 0;
1556                 bp->req_duplex = DUPLEX_FULL;
1557                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1558                         bp->req_line_speed = SPEED_10;
1559                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1560                                 bp->req_duplex = DUPLEX_HALF;
1561                 }
1562                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1563                         bp->req_line_speed = SPEED_100;
1564                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1565                                 bp->req_duplex = DUPLEX_HALF;
1566                 }
1567                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1568                         bp->req_line_speed = SPEED_1000;
1569                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1570                         bp->req_line_speed = SPEED_2500;
1571         }
1572 }
1573
1574 static void
1575 bnx2_set_default_link(struct bnx2 *bp)
1576 {
1577         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1578                 return bnx2_set_default_remote_link(bp);
1579
1580         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1581         bp->req_line_speed = 0;
1582         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1583                 u32 reg;
1584
1585                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1586
1587                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1588                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1589                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1590                         bp->autoneg = 0;
1591                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1592                         bp->req_duplex = DUPLEX_FULL;
1593                 }
1594         } else
1595                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1596 }
1597
1598 static void
1599 bnx2_send_heart_beat(struct bnx2 *bp)
1600 {
1601         u32 msg;
1602         u32 addr;
1603
1604         spin_lock(&bp->indirect_lock);
1605         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1606         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1607         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1608         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1609         spin_unlock(&bp->indirect_lock);
1610 }
1611
1612 static void
1613 bnx2_remote_phy_event(struct bnx2 *bp)
1614 {
1615         u32 msg;
1616         u8 link_up = bp->link_up;
1617         u8 old_port;
1618
1619         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1620
1621         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1622                 bnx2_send_heart_beat(bp);
1623
1624         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1625
1626         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1627                 bp->link_up = 0;
1628         else {
1629                 u32 speed;
1630
1631                 bp->link_up = 1;
1632                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1633                 bp->duplex = DUPLEX_FULL;
1634                 switch (speed) {
1635                         case BNX2_LINK_STATUS_10HALF:
1636                                 bp->duplex = DUPLEX_HALF;
1637                         case BNX2_LINK_STATUS_10FULL:
1638                                 bp->line_speed = SPEED_10;
1639                                 break;
1640                         case BNX2_LINK_STATUS_100HALF:
1641                                 bp->duplex = DUPLEX_HALF;
1642                         case BNX2_LINK_STATUS_100BASE_T4:
1643                         case BNX2_LINK_STATUS_100FULL:
1644                                 bp->line_speed = SPEED_100;
1645                                 break;
1646                         case BNX2_LINK_STATUS_1000HALF:
1647                                 bp->duplex = DUPLEX_HALF;
1648                         case BNX2_LINK_STATUS_1000FULL:
1649                                 bp->line_speed = SPEED_1000;
1650                                 break;
1651                         case BNX2_LINK_STATUS_2500HALF:
1652                                 bp->duplex = DUPLEX_HALF;
1653                         case BNX2_LINK_STATUS_2500FULL:
1654                                 bp->line_speed = SPEED_2500;
1655                                 break;
1656                         default:
1657                                 bp->line_speed = 0;
1658                                 break;
1659                 }
1660
1661                 spin_lock(&bp->phy_lock);
1662                 bp->flow_ctrl = 0;
1663                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1664                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1665                         if (bp->duplex == DUPLEX_FULL)
1666                                 bp->flow_ctrl = bp->req_flow_ctrl;
1667                 } else {
1668                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1669                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1670                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1671                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1672                 }
1673
1674                 old_port = bp->phy_port;
1675                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1676                         bp->phy_port = PORT_FIBRE;
1677                 else
1678                         bp->phy_port = PORT_TP;
1679
1680                 if (old_port != bp->phy_port)
1681                         bnx2_set_default_link(bp);
1682
1683                 spin_unlock(&bp->phy_lock);
1684         }
1685         if (bp->link_up != link_up)
1686                 bnx2_report_link(bp);
1687
1688         bnx2_set_mac_link(bp);
1689 }
1690
1691 static int
1692 bnx2_set_remote_link(struct bnx2 *bp)
1693 {
1694         u32 evt_code;
1695
1696         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1697         switch (evt_code) {
1698                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1699                         bnx2_remote_phy_event(bp);
1700                         break;
1701                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1702                 default:
1703                         bnx2_send_heart_beat(bp);
1704                         break;
1705         }
1706         return 0;
1707 }
1708
1709 static int
1710 bnx2_setup_copper_phy(struct bnx2 *bp)
1711 {
1712         u32 bmcr;
1713         u32 new_bmcr;
1714
1715         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1716
1717         if (bp->autoneg & AUTONEG_SPEED) {
1718                 u32 adv_reg, adv1000_reg;
1719                 u32 new_adv_reg = 0;
1720                 u32 new_adv1000_reg = 0;
1721
1722                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1723                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1724                         ADVERTISE_PAUSE_ASYM);
1725
1726                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1727                 adv1000_reg &= PHY_ALL_1000_SPEED;
1728
1729                 if (bp->advertising & ADVERTISED_10baseT_Half)
1730                         new_adv_reg |= ADVERTISE_10HALF;
1731                 if (bp->advertising & ADVERTISED_10baseT_Full)
1732                         new_adv_reg |= ADVERTISE_10FULL;
1733                 if (bp->advertising & ADVERTISED_100baseT_Half)
1734                         new_adv_reg |= ADVERTISE_100HALF;
1735                 if (bp->advertising & ADVERTISED_100baseT_Full)
1736                         new_adv_reg |= ADVERTISE_100FULL;
1737                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1738                         new_adv1000_reg |= ADVERTISE_1000FULL;
1739
1740                 new_adv_reg |= ADVERTISE_CSMA;
1741
1742                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1743
1744                 if ((adv1000_reg != new_adv1000_reg) ||
1745                         (adv_reg != new_adv_reg) ||
1746                         ((bmcr & BMCR_ANENABLE) == 0)) {
1747
1748                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1749                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1750                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1751                                 BMCR_ANENABLE);
1752                 }
1753                 else if (bp->link_up) {
1754                         /* Flow ctrl may have changed from auto to forced */
1755                         /* or vice-versa. */
1756
1757                         bnx2_resolve_flow_ctrl(bp);
1758                         bnx2_set_mac_link(bp);
1759                 }
1760                 return 0;
1761         }
1762
1763         new_bmcr = 0;
1764         if (bp->req_line_speed == SPEED_100) {
1765                 new_bmcr |= BMCR_SPEED100;
1766         }
1767         if (bp->req_duplex == DUPLEX_FULL) {
1768                 new_bmcr |= BMCR_FULLDPLX;
1769         }
1770         if (new_bmcr != bmcr) {
1771                 u32 bmsr;
1772
1773                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1774                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1775
1776                 if (bmsr & BMSR_LSTATUS) {
1777                         /* Force link down */
1778                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1779                         spin_unlock_bh(&bp->phy_lock);
1780                         msleep(50);
1781                         spin_lock_bh(&bp->phy_lock);
1782
1783                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1784                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1785                 }
1786
1787                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1788
1789                 /* Normally, the new speed is setup after the link has
1790                  * gone down and up again. In some cases, link will not go
1791                  * down so we need to set up the new speed here.
1792                  */
1793                 if (bmsr & BMSR_LSTATUS) {
1794                         bp->line_speed = bp->req_line_speed;
1795                         bp->duplex = bp->req_duplex;
1796                         bnx2_resolve_flow_ctrl(bp);
1797                         bnx2_set_mac_link(bp);
1798                 }
1799         } else {
1800                 bnx2_resolve_flow_ctrl(bp);
1801                 bnx2_set_mac_link(bp);
1802         }
1803         return 0;
1804 }
1805
1806 static int
1807 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1808 {
1809         if (bp->loopback == MAC_LOOPBACK)
1810                 return 0;
1811
1812         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1813                 return (bnx2_setup_serdes_phy(bp, port));
1814         }
1815         else {
1816                 return (bnx2_setup_copper_phy(bp));
1817         }
1818 }
1819
1820 static int
1821 bnx2_init_5709s_phy(struct bnx2 *bp)
1822 {
1823         u32 val;
1824
1825         bp->mii_bmcr = MII_BMCR + 0x10;
1826         bp->mii_bmsr = MII_BMSR + 0x10;
1827         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1828         bp->mii_adv = MII_ADVERTISE + 0x10;
1829         bp->mii_lpa = MII_LPA + 0x10;
1830         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1831
1832         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1833         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1834
1835         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1836         bnx2_reset_phy(bp);
1837
1838         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1839
1840         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1841         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1842         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1843         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1844
1845         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1846         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1847         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1848                 val |= BCM5708S_UP1_2G5;
1849         else
1850                 val &= ~BCM5708S_UP1_2G5;
1851         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1852
1853         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1854         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1855         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1856         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1857
1858         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1859
1860         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1861               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1862         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1863
1864         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1865
1866         return 0;
1867 }
1868
1869 static int
1870 bnx2_init_5708s_phy(struct bnx2 *bp)
1871 {
1872         u32 val;
1873
1874         bnx2_reset_phy(bp);
1875
1876         bp->mii_up1 = BCM5708S_UP1;
1877
1878         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1879         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1880         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1881
1882         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1883         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1884         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1885
1886         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1887         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1888         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1889
1890         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
1891                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1892                 val |= BCM5708S_UP1_2G5;
1893                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1894         }
1895
1896         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1897             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1898             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1899                 /* increase tx signal amplitude */
1900                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1901                                BCM5708S_BLK_ADDR_TX_MISC);
1902                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1903                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1904                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1905                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1906         }
1907
1908         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1909               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1910
1911         if (val) {
1912                 u32 is_backplane;
1913
1914                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1915                                           BNX2_SHARED_HW_CFG_CONFIG);
1916                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1917                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1918                                        BCM5708S_BLK_ADDR_TX_MISC);
1919                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1920                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1921                                        BCM5708S_BLK_ADDR_DIG);
1922                 }
1923         }
1924         return 0;
1925 }
1926
1927 static int
1928 bnx2_init_5706s_phy(struct bnx2 *bp)
1929 {
1930         bnx2_reset_phy(bp);
1931
1932         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1933
1934         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1935                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1936
1937         if (bp->dev->mtu > 1500) {
1938                 u32 val;
1939
1940                 /* Set extended packet length bit */
1941                 bnx2_write_phy(bp, 0x18, 0x7);
1942                 bnx2_read_phy(bp, 0x18, &val);
1943                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1944
1945                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1946                 bnx2_read_phy(bp, 0x1c, &val);
1947                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1948         }
1949         else {
1950                 u32 val;
1951
1952                 bnx2_write_phy(bp, 0x18, 0x7);
1953                 bnx2_read_phy(bp, 0x18, &val);
1954                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1955
1956                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1957                 bnx2_read_phy(bp, 0x1c, &val);
1958                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1959         }
1960
1961         return 0;
1962 }
1963
1964 static int
1965 bnx2_init_copper_phy(struct bnx2 *bp)
1966 {
1967         u32 val;
1968
1969         bnx2_reset_phy(bp);
1970
1971         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
1972                 bnx2_write_phy(bp, 0x18, 0x0c00);
1973                 bnx2_write_phy(bp, 0x17, 0x000a);
1974                 bnx2_write_phy(bp, 0x15, 0x310b);
1975                 bnx2_write_phy(bp, 0x17, 0x201f);
1976                 bnx2_write_phy(bp, 0x15, 0x9506);
1977                 bnx2_write_phy(bp, 0x17, 0x401f);
1978                 bnx2_write_phy(bp, 0x15, 0x14e2);
1979                 bnx2_write_phy(bp, 0x18, 0x0400);
1980         }
1981
1982         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
1983                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1984                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1985                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1986                 val &= ~(1 << 8);
1987                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1988         }
1989
1990         if (bp->dev->mtu > 1500) {
1991                 /* Set extended packet length bit */
1992                 bnx2_write_phy(bp, 0x18, 0x7);
1993                 bnx2_read_phy(bp, 0x18, &val);
1994                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1995
1996                 bnx2_read_phy(bp, 0x10, &val);
1997                 bnx2_write_phy(bp, 0x10, val | 0x1);
1998         }
1999         else {
2000                 bnx2_write_phy(bp, 0x18, 0x7);
2001                 bnx2_read_phy(bp, 0x18, &val);
2002                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2003
2004                 bnx2_read_phy(bp, 0x10, &val);
2005                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2006         }
2007
2008         /* ethernet@wirespeed */
2009         bnx2_write_phy(bp, 0x18, 0x7007);
2010         bnx2_read_phy(bp, 0x18, &val);
2011         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2012         return 0;
2013 }
2014
2015
2016 static int
2017 bnx2_init_phy(struct bnx2 *bp)
2018 {
2019         u32 val;
2020         int rc = 0;
2021
2022         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2023         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2024
2025         bp->mii_bmcr = MII_BMCR;
2026         bp->mii_bmsr = MII_BMSR;
2027         bp->mii_bmsr1 = MII_BMSR;
2028         bp->mii_adv = MII_ADVERTISE;
2029         bp->mii_lpa = MII_LPA;
2030
2031         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2032
2033         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2034                 goto setup_phy;
2035
2036         bnx2_read_phy(bp, MII_PHYSID1, &val);
2037         bp->phy_id = val << 16;
2038         bnx2_read_phy(bp, MII_PHYSID2, &val);
2039         bp->phy_id |= val & 0xffff;
2040
2041         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2042                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2043                         rc = bnx2_init_5706s_phy(bp);
2044                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2045                         rc = bnx2_init_5708s_phy(bp);
2046                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2047                         rc = bnx2_init_5709s_phy(bp);
2048         }
2049         else {
2050                 rc = bnx2_init_copper_phy(bp);
2051         }
2052
2053 setup_phy:
2054         if (!rc)
2055                 rc = bnx2_setup_phy(bp, bp->phy_port);
2056
2057         return rc;
2058 }
2059
2060 static int
2061 bnx2_set_mac_loopback(struct bnx2 *bp)
2062 {
2063         u32 mac_mode;
2064
2065         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2066         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2067         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2068         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2069         bp->link_up = 1;
2070         return 0;
2071 }
2072
2073 static int bnx2_test_link(struct bnx2 *);
2074
2075 static int
2076 bnx2_set_phy_loopback(struct bnx2 *bp)
2077 {
2078         u32 mac_mode;
2079         int rc, i;
2080
2081         spin_lock_bh(&bp->phy_lock);
2082         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2083                             BMCR_SPEED1000);
2084         spin_unlock_bh(&bp->phy_lock);
2085         if (rc)
2086                 return rc;
2087
2088         for (i = 0; i < 10; i++) {
2089                 if (bnx2_test_link(bp) == 0)
2090                         break;
2091                 msleep(100);
2092         }
2093
2094         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2095         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2096                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2097                       BNX2_EMAC_MODE_25G_MODE);
2098
2099         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2100         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2101         bp->link_up = 1;
2102         return 0;
2103 }
2104
2105 static int
2106 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2107 {
2108         int i;
2109         u32 val;
2110
2111         bp->fw_wr_seq++;
2112         msg_data |= bp->fw_wr_seq;
2113
2114         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2115
2116         /* wait for an acknowledgement. */
2117         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2118                 msleep(10);
2119
2120                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2121
2122                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2123                         break;
2124         }
2125         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2126                 return 0;
2127
2128         /* If we timed out, inform the firmware that this is the case. */
2129         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2130                 if (!silent)
2131                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2132                                             "%x\n", msg_data);
2133
2134                 msg_data &= ~BNX2_DRV_MSG_CODE;
2135                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2136
2137                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2138
2139                 return -EBUSY;
2140         }
2141
2142         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2143                 return -EIO;
2144
2145         return 0;
2146 }
2147
2148 static int
2149 bnx2_init_5709_context(struct bnx2 *bp)
2150 {
2151         int i, ret = 0;
2152         u32 val;
2153
2154         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2155         val |= (BCM_PAGE_BITS - 8) << 16;
2156         REG_WR(bp, BNX2_CTX_COMMAND, val);
2157         for (i = 0; i < 10; i++) {
2158                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2159                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2160                         break;
2161                 udelay(2);
2162         }
2163         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2164                 return -EBUSY;
2165
2166         for (i = 0; i < bp->ctx_pages; i++) {
2167                 int j;
2168
2169                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2170                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2171                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2172                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2173                        (u64) bp->ctx_blk_mapping[i] >> 32);
2174                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2175                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2176                 for (j = 0; j < 10; j++) {
2177
2178                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2179                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2180                                 break;
2181                         udelay(5);
2182                 }
2183                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2184                         ret = -EBUSY;
2185                         break;
2186                 }
2187         }
2188         return ret;
2189 }
2190
2191 static void
2192 bnx2_init_context(struct bnx2 *bp)
2193 {
2194         u32 vcid;
2195
2196         vcid = 96;
2197         while (vcid) {
2198                 u32 vcid_addr, pcid_addr, offset;
2199                 int i;
2200
2201                 vcid--;
2202
2203                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2204                         u32 new_vcid;
2205
2206                         vcid_addr = GET_PCID_ADDR(vcid);
2207                         if (vcid & 0x8) {
2208                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2209                         }
2210                         else {
2211                                 new_vcid = vcid;
2212                         }
2213                         pcid_addr = GET_PCID_ADDR(new_vcid);
2214                 }
2215                 else {
2216                         vcid_addr = GET_CID_ADDR(vcid);
2217                         pcid_addr = vcid_addr;
2218                 }
2219
2220                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2221                         vcid_addr += (i << PHY_CTX_SHIFT);
2222                         pcid_addr += (i << PHY_CTX_SHIFT);
2223
2224                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2225                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2226
2227                         /* Zero out the context. */
2228                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2229                                 CTX_WR(bp, vcid_addr, offset, 0);
2230                 }
2231         }
2232 }
2233
2234 static int
2235 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2236 {
2237         u16 *good_mbuf;
2238         u32 good_mbuf_cnt;
2239         u32 val;
2240
2241         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2242         if (good_mbuf == NULL) {
2243                 printk(KERN_ERR PFX "Failed to allocate memory in "
2244                                     "bnx2_alloc_bad_rbuf\n");
2245                 return -ENOMEM;
2246         }
2247
2248         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2249                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2250
2251         good_mbuf_cnt = 0;
2252
2253         /* Allocate a bunch of mbufs and save the good ones in an array. */
2254         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2255         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2256                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2257
2258                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2259
2260                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2261
2262                 /* The addresses with Bit 9 set are bad memory blocks. */
2263                 if (!(val & (1 << 9))) {
2264                         good_mbuf[good_mbuf_cnt] = (u16) val;
2265                         good_mbuf_cnt++;
2266                 }
2267
2268                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2269         }
2270
2271         /* Free the good ones back to the mbuf pool thus discarding
2272          * all the bad ones. */
2273         while (good_mbuf_cnt) {
2274                 good_mbuf_cnt--;
2275
2276                 val = good_mbuf[good_mbuf_cnt];
2277                 val = (val << 9) | val | 1;
2278
2279                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2280         }
2281         kfree(good_mbuf);
2282         return 0;
2283 }
2284
2285 static void
2286 bnx2_set_mac_addr(struct bnx2 *bp)
2287 {
2288         u32 val;
2289         u8 *mac_addr = bp->dev->dev_addr;
2290
2291         val = (mac_addr[0] << 8) | mac_addr[1];
2292
2293         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2294
2295         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2296                 (mac_addr[4] << 8) | mac_addr[5];
2297
2298         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2299 }
2300
2301 static inline int
2302 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2303 {
2304         dma_addr_t mapping;
2305         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2306         struct rx_bd *rxbd =
2307                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2308         struct page *page = alloc_page(GFP_ATOMIC);
2309
2310         if (!page)
2311                 return -ENOMEM;
2312         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2313                                PCI_DMA_FROMDEVICE);
2314         rx_pg->page = page;
2315         pci_unmap_addr_set(rx_pg, mapping, mapping);
2316         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2317         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2318         return 0;
2319 }
2320
2321 static void
2322 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2323 {
2324         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2325         struct page *page = rx_pg->page;
2326
2327         if (!page)
2328                 return;
2329
2330         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2331                        PCI_DMA_FROMDEVICE);
2332
2333         __free_page(page);
2334         rx_pg->page = NULL;
2335 }
2336
2337 static inline int
2338 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2339 {
2340         struct sk_buff *skb;
2341         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2342         dma_addr_t mapping;
2343         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2344         unsigned long align;
2345
2346         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2347         if (skb == NULL) {
2348                 return -ENOMEM;
2349         }
2350
2351         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2352                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2353
2354         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2355                 PCI_DMA_FROMDEVICE);
2356
2357         rx_buf->skb = skb;
2358         pci_unmap_addr_set(rx_buf, mapping, mapping);
2359
2360         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2361         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2362
2363         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2364
2365         return 0;
2366 }
2367
2368 static int
2369 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2370 {
2371         struct status_block *sblk = bnapi->status_blk;
2372         u32 new_link_state, old_link_state;
2373         int is_set = 1;
2374
2375         new_link_state = sblk->status_attn_bits & event;
2376         old_link_state = sblk->status_attn_bits_ack & event;
2377         if (new_link_state != old_link_state) {
2378                 if (new_link_state)
2379                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2380                 else
2381                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2382         } else
2383                 is_set = 0;
2384
2385         return is_set;
2386 }
2387
2388 static void
2389 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2390 {
2391         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2392                 spin_lock(&bp->phy_lock);
2393                 bnx2_set_link(bp);
2394                 spin_unlock(&bp->phy_lock);
2395         }
2396         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2397                 bnx2_set_remote_link(bp);
2398
2399 }
2400
2401 static inline u16
2402 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2403 {
2404         u16 cons;
2405
2406         if (bnapi->int_num == 0)
2407                 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2408         else
2409                 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2410
2411         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2412                 cons++;
2413         return cons;
2414 }
2415
2416 static int
2417 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2418 {
2419         u16 hw_cons, sw_cons, sw_ring_cons;
2420         int tx_pkt = 0;
2421
2422         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2423         sw_cons = bnapi->tx_cons;
2424
2425         while (sw_cons != hw_cons) {
2426                 struct sw_bd *tx_buf;
2427                 struct sk_buff *skb;
2428                 int i, last;
2429
2430                 sw_ring_cons = TX_RING_IDX(sw_cons);
2431
2432                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2433                 skb = tx_buf->skb;
2434
2435                 /* partial BD completions possible with TSO packets */
2436                 if (skb_is_gso(skb)) {
2437                         u16 last_idx, last_ring_idx;
2438
2439                         last_idx = sw_cons +
2440                                 skb_shinfo(skb)->nr_frags + 1;
2441                         last_ring_idx = sw_ring_cons +
2442                                 skb_shinfo(skb)->nr_frags + 1;
2443                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2444                                 last_idx++;
2445                         }
2446                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2447                                 break;
2448                         }
2449                 }
2450
2451                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2452                         skb_headlen(skb), PCI_DMA_TODEVICE);
2453
2454                 tx_buf->skb = NULL;
2455                 last = skb_shinfo(skb)->nr_frags;
2456
2457                 for (i = 0; i < last; i++) {
2458                         sw_cons = NEXT_TX_BD(sw_cons);
2459
2460                         pci_unmap_page(bp->pdev,
2461                                 pci_unmap_addr(
2462                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2463                                         mapping),
2464                                 skb_shinfo(skb)->frags[i].size,
2465                                 PCI_DMA_TODEVICE);
2466                 }
2467
2468                 sw_cons = NEXT_TX_BD(sw_cons);
2469
2470                 dev_kfree_skb(skb);
2471                 tx_pkt++;
2472                 if (tx_pkt == budget)
2473                         break;
2474
2475                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2476         }
2477
2478         bnapi->hw_tx_cons = hw_cons;
2479         bnapi->tx_cons = sw_cons;
2480         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2481          * before checking for netif_queue_stopped().  Without the
2482          * memory barrier, there is a small possibility that bnx2_start_xmit()
2483          * will miss it and cause the queue to be stopped forever.
2484          */
2485         smp_mb();
2486
2487         if (unlikely(netif_queue_stopped(bp->dev)) &&
2488                      (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2489                 netif_tx_lock(bp->dev);
2490                 if ((netif_queue_stopped(bp->dev)) &&
2491                     (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2492                         netif_wake_queue(bp->dev);
2493                 netif_tx_unlock(bp->dev);
2494         }
2495         return tx_pkt;
2496 }
2497
2498 static void
2499 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2500                         struct sk_buff *skb, int count)
2501 {
2502         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2503         struct rx_bd *cons_bd, *prod_bd;
2504         dma_addr_t mapping;
2505         int i;
2506         u16 hw_prod = bnapi->rx_pg_prod, prod;
2507         u16 cons = bnapi->rx_pg_cons;
2508
2509         for (i = 0; i < count; i++) {
2510                 prod = RX_PG_RING_IDX(hw_prod);
2511
2512                 prod_rx_pg = &bp->rx_pg_ring[prod];
2513                 cons_rx_pg = &bp->rx_pg_ring[cons];
2514                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2515                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2516
2517                 if (i == 0 && skb) {
2518                         struct page *page;
2519                         struct skb_shared_info *shinfo;
2520
2521                         shinfo = skb_shinfo(skb);
2522                         shinfo->nr_frags--;
2523                         page = shinfo->frags[shinfo->nr_frags].page;
2524                         shinfo->frags[shinfo->nr_frags].page = NULL;
2525                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2526                                                PCI_DMA_FROMDEVICE);
2527                         cons_rx_pg->page = page;
2528                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2529                         dev_kfree_skb(skb);
2530                 }
2531                 if (prod != cons) {
2532                         prod_rx_pg->page = cons_rx_pg->page;
2533                         cons_rx_pg->page = NULL;
2534                         pci_unmap_addr_set(prod_rx_pg, mapping,
2535                                 pci_unmap_addr(cons_rx_pg, mapping));
2536
2537                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2538                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2539
2540                 }
2541                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2542                 hw_prod = NEXT_RX_BD(hw_prod);
2543         }
2544         bnapi->rx_pg_prod = hw_prod;
2545         bnapi->rx_pg_cons = cons;
2546 }
2547
2548 static inline void
2549 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2550         u16 cons, u16 prod)
2551 {
2552         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2553         struct rx_bd *cons_bd, *prod_bd;
2554
2555         cons_rx_buf = &bp->rx_buf_ring[cons];
2556         prod_rx_buf = &bp->rx_buf_ring[prod];
2557
2558         pci_dma_sync_single_for_device(bp->pdev,
2559                 pci_unmap_addr(cons_rx_buf, mapping),
2560                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2561
2562         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2563
2564         prod_rx_buf->skb = skb;
2565
2566         if (cons == prod)
2567                 return;
2568
2569         pci_unmap_addr_set(prod_rx_buf, mapping,
2570                         pci_unmap_addr(cons_rx_buf, mapping));
2571
2572         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2573         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2574         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2575         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2576 }
2577
2578 static int
2579 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2580             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2581             u32 ring_idx)
2582 {
2583         int err;
2584         u16 prod = ring_idx & 0xffff;
2585
2586         err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2587         if (unlikely(err)) {
2588                 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2589                 if (hdr_len) {
2590                         unsigned int raw_len = len + 4;
2591                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2592
2593                         bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2594                 }
2595                 return err;
2596         }
2597
2598         skb_reserve(skb, bp->rx_offset);
2599         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2600                          PCI_DMA_FROMDEVICE);
2601
2602         if (hdr_len == 0) {
2603                 skb_put(skb, len);
2604                 return 0;
2605         } else {
2606                 unsigned int i, frag_len, frag_size, pages;
2607                 struct sw_pg *rx_pg;
2608                 u16 pg_cons = bnapi->rx_pg_cons;
2609                 u16 pg_prod = bnapi->rx_pg_prod;
2610
2611                 frag_size = len + 4 - hdr_len;
2612                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2613                 skb_put(skb, hdr_len);
2614
2615                 for (i = 0; i < pages; i++) {
2616                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2617                         if (unlikely(frag_len <= 4)) {
2618                                 unsigned int tail = 4 - frag_len;
2619
2620                                 bnapi->rx_pg_cons = pg_cons;
2621                                 bnapi->rx_pg_prod = pg_prod;
2622                                 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2623                                                         pages - i);
2624                                 skb->len -= tail;
2625                                 if (i == 0) {
2626                                         skb->tail -= tail;
2627                                 } else {
2628                                         skb_frag_t *frag =
2629                                                 &skb_shinfo(skb)->frags[i - 1];
2630                                         frag->size -= tail;
2631                                         skb->data_len -= tail;
2632                                         skb->truesize -= tail;
2633                                 }
2634                                 return 0;
2635                         }
2636                         rx_pg = &bp->rx_pg_ring[pg_cons];
2637
2638                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2639                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2640
2641                         if (i == pages - 1)
2642                                 frag_len -= 4;
2643
2644                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2645                         rx_pg->page = NULL;
2646
2647                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2648                         if (unlikely(err)) {
2649                                 bnapi->rx_pg_cons = pg_cons;
2650                                 bnapi->rx_pg_prod = pg_prod;
2651                                 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2652                                                         pages - i);
2653                                 return err;
2654                         }
2655
2656                         frag_size -= frag_len;
2657                         skb->data_len += frag_len;
2658                         skb->truesize += frag_len;
2659                         skb->len += frag_len;
2660
2661                         pg_prod = NEXT_RX_BD(pg_prod);
2662                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2663                 }
2664                 bnapi->rx_pg_prod = pg_prod;
2665                 bnapi->rx_pg_cons = pg_cons;
2666         }
2667         return 0;
2668 }
2669
2670 static inline u16
2671 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2672 {
2673         u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2674
2675         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2676                 cons++;
2677         return cons;
2678 }
2679
2680 static int
2681 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2682 {
2683         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2684         struct l2_fhdr *rx_hdr;
2685         int rx_pkt = 0, pg_ring_used = 0;
2686
2687         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2688         sw_cons = bnapi->rx_cons;
2689         sw_prod = bnapi->rx_prod;
2690
2691         /* Memory barrier necessary as speculative reads of the rx
2692          * buffer can be ahead of the index in the status block
2693          */
2694         rmb();
2695         while (sw_cons != hw_cons) {
2696                 unsigned int len, hdr_len;
2697                 u32 status;
2698                 struct sw_bd *rx_buf;
2699                 struct sk_buff *skb;
2700                 dma_addr_t dma_addr;
2701
2702                 sw_ring_cons = RX_RING_IDX(sw_cons);
2703                 sw_ring_prod = RX_RING_IDX(sw_prod);
2704
2705                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2706                 skb = rx_buf->skb;
2707
2708                 rx_buf->skb = NULL;
2709
2710                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2711
2712                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2713                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2714
2715                 rx_hdr = (struct l2_fhdr *) skb->data;
2716                 len = rx_hdr->l2_fhdr_pkt_len;
2717
2718                 if ((status = rx_hdr->l2_fhdr_status) &
2719                         (L2_FHDR_ERRORS_BAD_CRC |
2720                         L2_FHDR_ERRORS_PHY_DECODE |
2721                         L2_FHDR_ERRORS_ALIGNMENT |
2722                         L2_FHDR_ERRORS_TOO_SHORT |
2723                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2724
2725                         bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2726                                           sw_ring_prod);
2727                         goto next_rx;
2728                 }
2729                 hdr_len = 0;
2730                 if (status & L2_FHDR_STATUS_SPLIT) {
2731                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2732                         pg_ring_used = 1;
2733                 } else if (len > bp->rx_jumbo_thresh) {
2734                         hdr_len = bp->rx_jumbo_thresh;
2735                         pg_ring_used = 1;
2736                 }
2737
2738                 len -= 4;
2739
2740                 if (len <= bp->rx_copy_thresh) {
2741                         struct sk_buff *new_skb;
2742
2743                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2744                         if (new_skb == NULL) {
2745                                 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2746                                                   sw_ring_prod);
2747                                 goto next_rx;
2748                         }
2749
2750                         /* aligned copy */
2751                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2752                                       new_skb->data, len + 2);
2753                         skb_reserve(new_skb, 2);
2754                         skb_put(new_skb, len);
2755
2756                         bnx2_reuse_rx_skb(bp, bnapi, skb,
2757                                 sw_ring_cons, sw_ring_prod);
2758
2759                         skb = new_skb;
2760                 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2761                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2762                         goto next_rx;
2763
2764                 skb->protocol = eth_type_trans(skb, bp->dev);
2765
2766                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2767                         (ntohs(skb->protocol) != 0x8100)) {
2768
2769                         dev_kfree_skb(skb);
2770                         goto next_rx;
2771
2772                 }
2773
2774                 skb->ip_summed = CHECKSUM_NONE;
2775                 if (bp->rx_csum &&
2776                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2777                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2778
2779                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2780                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2781                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2782                 }
2783
2784 #ifdef BCM_VLAN
2785                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2786                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2787                                 rx_hdr->l2_fhdr_vlan_tag);
2788                 }
2789                 else
2790 #endif
2791                         netif_receive_skb(skb);
2792
2793                 bp->dev->last_rx = jiffies;
2794                 rx_pkt++;
2795
2796 next_rx:
2797                 sw_cons = NEXT_RX_BD(sw_cons);
2798                 sw_prod = NEXT_RX_BD(sw_prod);
2799
2800                 if ((rx_pkt == budget))
2801                         break;
2802
2803                 /* Refresh hw_cons to see if there is new work */
2804                 if (sw_cons == hw_cons) {
2805                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2806                         rmb();
2807                 }
2808         }
2809         bnapi->rx_cons = sw_cons;
2810         bnapi->rx_prod = sw_prod;
2811
2812         if (pg_ring_used)
2813                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2814                          bnapi->rx_pg_prod);
2815
2816         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2817
2818         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2819
2820         mmiowb();
2821
2822         return rx_pkt;
2823
2824 }
2825
2826 /* MSI ISR - The only difference between this and the INTx ISR
2827  * is that the MSI interrupt is always serviced.
2828  */
2829 static irqreturn_t
2830 bnx2_msi(int irq, void *dev_instance)
2831 {
2832         struct net_device *dev = dev_instance;
2833         struct bnx2 *bp = netdev_priv(dev);
2834         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2835
2836         prefetch(bnapi->status_blk);
2837         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2838                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2839                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2840
2841         /* Return here if interrupt is disabled. */
2842         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2843                 return IRQ_HANDLED;
2844
2845         netif_rx_schedule(dev, &bnapi->napi);
2846
2847         return IRQ_HANDLED;
2848 }
2849
2850 static irqreturn_t
2851 bnx2_msi_1shot(int irq, void *dev_instance)
2852 {
2853         struct net_device *dev = dev_instance;
2854         struct bnx2 *bp = netdev_priv(dev);
2855         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2856
2857         prefetch(bnapi->status_blk);
2858
2859         /* Return here if interrupt is disabled. */
2860         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2861                 return IRQ_HANDLED;
2862
2863         netif_rx_schedule(dev, &bnapi->napi);
2864
2865         return IRQ_HANDLED;
2866 }
2867
2868 static irqreturn_t
2869 bnx2_interrupt(int irq, void *dev_instance)
2870 {
2871         struct net_device *dev = dev_instance;
2872         struct bnx2 *bp = netdev_priv(dev);
2873         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2874         struct status_block *sblk = bnapi->status_blk;
2875
2876         /* When using INTx, it is possible for the interrupt to arrive
2877          * at the CPU before the status block posted prior to the
2878          * interrupt. Reading a register will flush the status block.
2879          * When using MSI, the MSI message will always complete after
2880          * the status block write.
2881          */
2882         if ((sblk->status_idx == bnapi->last_status_idx) &&
2883             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2884              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2885                 return IRQ_NONE;
2886
2887         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2888                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2889                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2890
2891         /* Read back to deassert IRQ immediately to avoid too many
2892          * spurious interrupts.
2893          */
2894         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2895
2896         /* Return here if interrupt is shared and is disabled. */
2897         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2898                 return IRQ_HANDLED;
2899
2900         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2901                 bnapi->last_status_idx = sblk->status_idx;
2902                 __netif_rx_schedule(dev, &bnapi->napi);
2903         }
2904
2905         return IRQ_HANDLED;
2906 }
2907
2908 static irqreturn_t
2909 bnx2_tx_msix(int irq, void *dev_instance)
2910 {
2911         struct net_device *dev = dev_instance;
2912         struct bnx2 *bp = netdev_priv(dev);
2913         struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2914
2915         prefetch(bnapi->status_blk_msix);
2916
2917         /* Return here if interrupt is disabled. */
2918         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2919                 return IRQ_HANDLED;
2920
2921         netif_rx_schedule(dev, &bnapi->napi);
2922         return IRQ_HANDLED;
2923 }
2924
2925 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2926                                  STATUS_ATTN_BITS_TIMER_ABORT)
2927
2928 static inline int
2929 bnx2_has_work(struct bnx2_napi *bnapi)
2930 {
2931         struct status_block *sblk = bnapi->status_blk;
2932
2933         if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2934             (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2935                 return 1;
2936
2937         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2938             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2939                 return 1;
2940
2941         return 0;
2942 }
2943
2944 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
2945 {
2946         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2947         struct bnx2 *bp = bnapi->bp;
2948         int work_done = 0;
2949         struct status_block_msix *sblk = bnapi->status_blk_msix;
2950
2951         do {
2952                 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
2953                 if (unlikely(work_done >= budget))
2954                         return work_done;
2955
2956                 bnapi->last_status_idx = sblk->status_idx;
2957                 rmb();
2958         } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
2959
2960         netif_rx_complete(bp->dev, napi);
2961         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
2962                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2963                bnapi->last_status_idx);
2964         return work_done;
2965 }
2966
2967 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2968                           int work_done, int budget)
2969 {
2970         struct status_block *sblk = bnapi->status_blk;
2971         u32 status_attn_bits = sblk->status_attn_bits;
2972         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2973
2974         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2975             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2976
2977                 bnx2_phy_int(bp, bnapi);
2978
2979                 /* This is needed to take care of transient status
2980                  * during link changes.
2981                  */
2982                 REG_WR(bp, BNX2_HC_COMMAND,
2983                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2984                 REG_RD(bp, BNX2_HC_COMMAND);
2985         }
2986
2987         if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2988                 bnx2_tx_int(bp, bnapi, 0);
2989
2990         if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
2991                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2992
2993         return work_done;
2994 }
2995
2996 static int bnx2_poll(struct napi_struct *napi, int budget)
2997 {
2998         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2999         struct bnx2 *bp = bnapi->bp;
3000         int work_done = 0;
3001         struct status_block *sblk = bnapi->status_blk;
3002
3003         while (1) {
3004                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3005
3006                 if (unlikely(work_done >= budget))
3007                         break;
3008
3009                 /* bnapi->last_status_idx is used below to tell the hw how
3010                  * much work has been processed, so we must read it before
3011                  * checking for more work.
3012                  */
3013                 bnapi->last_status_idx = sblk->status_idx;
3014                 rmb();
3015                 if (likely(!bnx2_has_work(bnapi))) {
3016                         netif_rx_complete(bp->dev, napi);
3017                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3018                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3019                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3020                                        bnapi->last_status_idx);
3021                                 break;
3022                         }
3023                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3024                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3025                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3026                                bnapi->last_status_idx);
3027
3028                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3029                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3030                                bnapi->last_status_idx);
3031                         break;
3032                 }
3033         }
3034
3035         return work_done;
3036 }
3037
3038 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3039  * from set_multicast.
3040  */
3041 static void
3042 bnx2_set_rx_mode(struct net_device *dev)
3043 {
3044         struct bnx2 *bp = netdev_priv(dev);
3045         u32 rx_mode, sort_mode;
3046         int i;
3047
3048         spin_lock_bh(&bp->phy_lock);
3049
3050         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3051                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3052         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3053 #ifdef BCM_VLAN
3054         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3055                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3056 #else
3057         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3058                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3059 #endif
3060         if (dev->flags & IFF_PROMISC) {
3061                 /* Promiscuous mode. */
3062                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3063                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3064                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3065         }
3066         else if (dev->flags & IFF_ALLMULTI) {
3067                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3068                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3069                                0xffffffff);
3070                 }
3071                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3072         }
3073         else {
3074                 /* Accept one or more multicast(s). */
3075                 struct dev_mc_list *mclist;
3076                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3077                 u32 regidx;
3078                 u32 bit;
3079                 u32 crc;
3080
3081                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3082
3083                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3084                      i++, mclist = mclist->next) {
3085
3086                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3087                         bit = crc & 0xff;
3088                         regidx = (bit & 0xe0) >> 5;
3089                         bit &= 0x1f;
3090                         mc_filter[regidx] |= (1 << bit);
3091                 }
3092
3093                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3094                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3095                                mc_filter[i]);
3096                 }
3097
3098                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3099         }
3100
3101         if (rx_mode != bp->rx_mode) {
3102                 bp->rx_mode = rx_mode;
3103                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3104         }
3105
3106         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3107         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3108         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3109
3110         spin_unlock_bh(&bp->phy_lock);
3111 }
3112
3113 static void
3114 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3115         u32 rv2p_proc)
3116 {
3117         int i;
3118         u32 val;
3119
3120
3121         for (i = 0; i < rv2p_code_len; i += 8) {
3122                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3123                 rv2p_code++;
3124                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3125                 rv2p_code++;
3126
3127                 if (rv2p_proc == RV2P_PROC1) {
3128                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3129                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3130                 }
3131                 else {
3132                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3133                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3134                 }
3135         }
3136
3137         /* Reset the processor, un-stall is done later. */
3138         if (rv2p_proc == RV2P_PROC1) {
3139                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3140         }
3141         else {
3142                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3143         }
3144 }
3145
3146 static int
3147 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3148 {
3149         u32 offset;
3150         u32 val;
3151         int rc;
3152
3153         /* Halt the CPU. */
3154         val = REG_RD_IND(bp, cpu_reg->mode);
3155         val |= cpu_reg->mode_value_halt;
3156         REG_WR_IND(bp, cpu_reg->mode, val);
3157         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3158
3159         /* Load the Text area. */
3160         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3161         if (fw->gz_text) {
3162                 int j;
3163
3164                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3165                                        fw->gz_text_len);
3166                 if (rc < 0)
3167                         return rc;
3168
3169                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3170                         REG_WR_IND(bp, offset, le32_to_cpu(fw->text[j]));
3171                 }
3172         }
3173
3174         /* Load the Data area. */
3175         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3176         if (fw->data) {
3177                 int j;
3178
3179                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3180                         REG_WR_IND(bp, offset, fw->data[j]);
3181                 }
3182         }
3183
3184         /* Load the SBSS area. */
3185         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3186         if (fw->sbss_len) {
3187                 int j;
3188
3189                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3190                         REG_WR_IND(bp, offset, 0);
3191                 }
3192         }
3193
3194         /* Load the BSS area. */
3195         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3196         if (fw->bss_len) {
3197                 int j;
3198
3199                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3200                         REG_WR_IND(bp, offset, 0);
3201                 }
3202         }
3203
3204         /* Load the Read-Only area. */
3205         offset = cpu_reg->spad_base +
3206                 (fw->rodata_addr - cpu_reg->mips_view_base);
3207         if (fw->rodata) {
3208                 int j;
3209
3210                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3211                         REG_WR_IND(bp, offset, fw->rodata[j]);
3212                 }
3213         }
3214
3215         /* Clear the pre-fetch instruction. */
3216         REG_WR_IND(bp, cpu_reg->inst, 0);
3217         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3218
3219         /* Start the CPU. */
3220         val = REG_RD_IND(bp, cpu_reg->mode);
3221         val &= ~cpu_reg->mode_value_halt;
3222         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3223         REG_WR_IND(bp, cpu_reg->mode, val);
3224
3225         return 0;
3226 }
3227
3228 static int