1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
58 #define DRV_MODULE_NAME "bnx2"
59 #define PFX DRV_MODULE_NAME ": "
60 #define DRV_MODULE_VERSION "1.5.5"
61 #define DRV_MODULE_RELDATE "February 1, 2007"
63 #define RUN_AT(x) (jiffies + (x))
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT (5*HZ)
68 static const char version[] __devinitdata =
69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_msi = 0;
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
106 static struct pci_device_id bnx2_pci_tbl[] = {
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
126 static struct flash_spec flash_table[] =
129 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
130 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 /* Expansion entry 0001 */
134 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 /* Saifun SA25F010 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
140 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143 "Non-buffered flash (128kB)"},
144 /* Saifun SA25F020 (non-buffered flash) */
145 /* strap, cfg1, & write1 need updates */
146 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149 "Non-buffered flash (256kB)"},
150 /* Expansion entry 0100 */
151 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
156 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165 /* Saifun SA25F005 (non-buffered flash) */
166 /* strap, cfg1, & write1 need updates */
167 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170 "Non-buffered flash (64kB)"},
172 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 /* Expansion entry 1001 */
177 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 /* Expansion entry 1010 */
182 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* ATMEL AT45DB011B (buffered flash) */
187 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190 "Buffered flash (128kB)"},
191 /* Expansion entry 1100 */
192 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 /* Expansion entry 1101 */
197 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Ateml Expansion entry 1110 */
202 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1110 (Atmel)"},
206 /* ATMEL AT45DB021B (buffered flash) */
207 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210 "Buffered flash (256kB)"},
213 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
221 /* The ring uses 256 indices for 255 entries, one of them
222 * needs to be skipped.
224 diff = bp->tx_prod - bp->tx_cons;
225 if (unlikely(diff >= TX_DESC_CNT)) {
227 if (diff == TX_DESC_CNT)
228 diff = MAX_TX_DESC_CNT;
230 return (bp->tx_ring_size - diff);
234 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
237 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
241 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
243 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
244 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
248 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
251 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
254 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
255 REG_WR(bp, BNX2_CTX_CTX_CTRL,
256 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
257 for (i = 0; i < 5; i++) {
259 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
260 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
265 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
266 REG_WR(bp, BNX2_CTX_DATA, val);
271 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
276 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
277 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
278 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
280 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
281 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
286 val1 = (bp->phy_addr << 21) | (reg << 16) |
287 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
288 BNX2_EMAC_MDIO_COMM_START_BUSY;
289 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
291 for (i = 0; i < 50; i++) {
294 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
295 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
298 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
299 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
305 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
314 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
315 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
318 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
319 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
328 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
333 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
334 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
335 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
337 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
338 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
344 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
345 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
346 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
348 for (i = 0; i < 50; i++) {
351 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
352 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
358 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
363 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
364 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
367 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
377 bnx2_disable_int(struct bnx2 *bp)
379 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
380 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
381 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
385 bnx2_enable_int(struct bnx2 *bp)
387 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
388 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
389 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
391 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
392 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
394 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
398 bnx2_disable_int_sync(struct bnx2 *bp)
400 atomic_inc(&bp->intr_sem);
401 bnx2_disable_int(bp);
402 synchronize_irq(bp->pdev->irq);
406 bnx2_netif_stop(struct bnx2 *bp)
408 bnx2_disable_int_sync(bp);
409 if (netif_running(bp->dev)) {
410 netif_poll_disable(bp->dev);
411 netif_tx_disable(bp->dev);
412 bp->dev->trans_start = jiffies; /* prevent tx timeout */
417 bnx2_netif_start(struct bnx2 *bp)
419 if (atomic_dec_and_test(&bp->intr_sem)) {
420 if (netif_running(bp->dev)) {
421 netif_wake_queue(bp->dev);
422 netif_poll_enable(bp->dev);
429 bnx2_free_mem(struct bnx2 *bp)
433 for (i = 0; i < bp->ctx_pages; i++) {
434 if (bp->ctx_blk[i]) {
435 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
437 bp->ctx_blk_mapping[i]);
438 bp->ctx_blk[i] = NULL;
441 if (bp->status_blk) {
442 pci_free_consistent(bp->pdev, bp->status_stats_size,
443 bp->status_blk, bp->status_blk_mapping);
444 bp->status_blk = NULL;
445 bp->stats_blk = NULL;
447 if (bp->tx_desc_ring) {
448 pci_free_consistent(bp->pdev,
449 sizeof(struct tx_bd) * TX_DESC_CNT,
450 bp->tx_desc_ring, bp->tx_desc_mapping);
451 bp->tx_desc_ring = NULL;
453 kfree(bp->tx_buf_ring);
454 bp->tx_buf_ring = NULL;
455 for (i = 0; i < bp->rx_max_ring; i++) {
456 if (bp->rx_desc_ring[i])
457 pci_free_consistent(bp->pdev,
458 sizeof(struct rx_bd) * RX_DESC_CNT,
460 bp->rx_desc_mapping[i]);
461 bp->rx_desc_ring[i] = NULL;
463 vfree(bp->rx_buf_ring);
464 bp->rx_buf_ring = NULL;
468 bnx2_alloc_mem(struct bnx2 *bp)
470 int i, status_blk_size;
472 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
474 if (bp->tx_buf_ring == NULL)
477 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
478 sizeof(struct tx_bd) *
480 &bp->tx_desc_mapping);
481 if (bp->tx_desc_ring == NULL)
484 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
486 if (bp->rx_buf_ring == NULL)
489 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
492 for (i = 0; i < bp->rx_max_ring; i++) {
493 bp->rx_desc_ring[i] =
494 pci_alloc_consistent(bp->pdev,
495 sizeof(struct rx_bd) * RX_DESC_CNT,
496 &bp->rx_desc_mapping[i]);
497 if (bp->rx_desc_ring[i] == NULL)
502 /* Combine status and statistics blocks into one allocation. */
503 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
504 bp->status_stats_size = status_blk_size +
505 sizeof(struct statistics_block);
507 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
508 &bp->status_blk_mapping);
509 if (bp->status_blk == NULL)
512 memset(bp->status_blk, 0, bp->status_stats_size);
514 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
517 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
519 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
520 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
521 if (bp->ctx_pages == 0)
523 for (i = 0; i < bp->ctx_pages; i++) {
524 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
526 &bp->ctx_blk_mapping[i]);
527 if (bp->ctx_blk[i] == NULL)
539 bnx2_report_fw_link(struct bnx2 *bp)
541 u32 fw_link_status = 0;
546 switch (bp->line_speed) {
548 if (bp->duplex == DUPLEX_HALF)
549 fw_link_status = BNX2_LINK_STATUS_10HALF;
551 fw_link_status = BNX2_LINK_STATUS_10FULL;
554 if (bp->duplex == DUPLEX_HALF)
555 fw_link_status = BNX2_LINK_STATUS_100HALF;
557 fw_link_status = BNX2_LINK_STATUS_100FULL;
560 if (bp->duplex == DUPLEX_HALF)
561 fw_link_status = BNX2_LINK_STATUS_1000HALF;
563 fw_link_status = BNX2_LINK_STATUS_1000FULL;
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_2500HALF;
569 fw_link_status = BNX2_LINK_STATUS_2500FULL;
573 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
576 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
578 bnx2_read_phy(bp, MII_BMSR, &bmsr);
579 bnx2_read_phy(bp, MII_BMSR, &bmsr);
581 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
582 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
583 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
585 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
589 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
591 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
595 bnx2_report_link(struct bnx2 *bp)
598 netif_carrier_on(bp->dev);
599 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
601 printk("%d Mbps ", bp->line_speed);
603 if (bp->duplex == DUPLEX_FULL)
604 printk("full duplex");
606 printk("half duplex");
609 if (bp->flow_ctrl & FLOW_CTRL_RX) {
610 printk(", receive ");
611 if (bp->flow_ctrl & FLOW_CTRL_TX)
612 printk("& transmit ");
615 printk(", transmit ");
617 printk("flow control ON");
622 netif_carrier_off(bp->dev);
623 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
626 bnx2_report_fw_link(bp);
630 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
632 u32 local_adv, remote_adv;
635 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
636 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
638 if (bp->duplex == DUPLEX_FULL) {
639 bp->flow_ctrl = bp->req_flow_ctrl;
644 if (bp->duplex != DUPLEX_FULL) {
648 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
649 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
652 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
653 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
654 bp->flow_ctrl |= FLOW_CTRL_TX;
655 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
656 bp->flow_ctrl |= FLOW_CTRL_RX;
660 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
661 bnx2_read_phy(bp, MII_LPA, &remote_adv);
663 if (bp->phy_flags & PHY_SERDES_FLAG) {
664 u32 new_local_adv = 0;
665 u32 new_remote_adv = 0;
667 if (local_adv & ADVERTISE_1000XPAUSE)
668 new_local_adv |= ADVERTISE_PAUSE_CAP;
669 if (local_adv & ADVERTISE_1000XPSE_ASYM)
670 new_local_adv |= ADVERTISE_PAUSE_ASYM;
671 if (remote_adv & ADVERTISE_1000XPAUSE)
672 new_remote_adv |= ADVERTISE_PAUSE_CAP;
673 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
674 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
676 local_adv = new_local_adv;
677 remote_adv = new_remote_adv;
680 /* See Table 28B-3 of 802.3ab-1999 spec. */
681 if (local_adv & ADVERTISE_PAUSE_CAP) {
682 if(local_adv & ADVERTISE_PAUSE_ASYM) {
683 if (remote_adv & ADVERTISE_PAUSE_CAP) {
684 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
686 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
687 bp->flow_ctrl = FLOW_CTRL_RX;
691 if (remote_adv & ADVERTISE_PAUSE_CAP) {
692 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
696 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
697 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
698 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
700 bp->flow_ctrl = FLOW_CTRL_TX;
706 bnx2_5708s_linkup(struct bnx2 *bp)
711 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
712 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
713 case BCM5708S_1000X_STAT1_SPEED_10:
714 bp->line_speed = SPEED_10;
716 case BCM5708S_1000X_STAT1_SPEED_100:
717 bp->line_speed = SPEED_100;
719 case BCM5708S_1000X_STAT1_SPEED_1G:
720 bp->line_speed = SPEED_1000;
722 case BCM5708S_1000X_STAT1_SPEED_2G5:
723 bp->line_speed = SPEED_2500;
726 if (val & BCM5708S_1000X_STAT1_FD)
727 bp->duplex = DUPLEX_FULL;
729 bp->duplex = DUPLEX_HALF;
735 bnx2_5706s_linkup(struct bnx2 *bp)
737 u32 bmcr, local_adv, remote_adv, common;
740 bp->line_speed = SPEED_1000;
742 bnx2_read_phy(bp, MII_BMCR, &bmcr);
743 if (bmcr & BMCR_FULLDPLX) {
744 bp->duplex = DUPLEX_FULL;
747 bp->duplex = DUPLEX_HALF;
750 if (!(bmcr & BMCR_ANENABLE)) {
754 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
755 bnx2_read_phy(bp, MII_LPA, &remote_adv);
757 common = local_adv & remote_adv;
758 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
760 if (common & ADVERTISE_1000XFULL) {
761 bp->duplex = DUPLEX_FULL;
764 bp->duplex = DUPLEX_HALF;
772 bnx2_copper_linkup(struct bnx2 *bp)
776 bnx2_read_phy(bp, MII_BMCR, &bmcr);
777 if (bmcr & BMCR_ANENABLE) {
778 u32 local_adv, remote_adv, common;
780 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
781 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
783 common = local_adv & (remote_adv >> 2);
784 if (common & ADVERTISE_1000FULL) {
785 bp->line_speed = SPEED_1000;
786 bp->duplex = DUPLEX_FULL;
788 else if (common & ADVERTISE_1000HALF) {
789 bp->line_speed = SPEED_1000;
790 bp->duplex = DUPLEX_HALF;
793 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
794 bnx2_read_phy(bp, MII_LPA, &remote_adv);
796 common = local_adv & remote_adv;
797 if (common & ADVERTISE_100FULL) {
798 bp->line_speed = SPEED_100;
799 bp->duplex = DUPLEX_FULL;
801 else if (common & ADVERTISE_100HALF) {
802 bp->line_speed = SPEED_100;
803 bp->duplex = DUPLEX_HALF;
805 else if (common & ADVERTISE_10FULL) {
806 bp->line_speed = SPEED_10;
807 bp->duplex = DUPLEX_FULL;
809 else if (common & ADVERTISE_10HALF) {
810 bp->line_speed = SPEED_10;
811 bp->duplex = DUPLEX_HALF;
820 if (bmcr & BMCR_SPEED100) {
821 bp->line_speed = SPEED_100;
824 bp->line_speed = SPEED_10;
826 if (bmcr & BMCR_FULLDPLX) {
827 bp->duplex = DUPLEX_FULL;
830 bp->duplex = DUPLEX_HALF;
838 bnx2_set_mac_link(struct bnx2 *bp)
842 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
843 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
844 (bp->duplex == DUPLEX_HALF)) {
845 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
848 /* Configure the EMAC mode register. */
849 val = REG_RD(bp, BNX2_EMAC_MODE);
851 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
852 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
853 BNX2_EMAC_MODE_25G_MODE);
856 switch (bp->line_speed) {
858 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
859 val |= BNX2_EMAC_MODE_PORT_MII_10M;
864 val |= BNX2_EMAC_MODE_PORT_MII;
867 val |= BNX2_EMAC_MODE_25G_MODE;
870 val |= BNX2_EMAC_MODE_PORT_GMII;
875 val |= BNX2_EMAC_MODE_PORT_GMII;
878 /* Set the MAC to operate in the appropriate duplex mode. */
879 if (bp->duplex == DUPLEX_HALF)
880 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
881 REG_WR(bp, BNX2_EMAC_MODE, val);
883 /* Enable/disable rx PAUSE. */
884 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
886 if (bp->flow_ctrl & FLOW_CTRL_RX)
887 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
888 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
890 /* Enable/disable tx PAUSE. */
891 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
892 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
894 if (bp->flow_ctrl & FLOW_CTRL_TX)
895 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
896 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
898 /* Acknowledge the interrupt. */
899 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
905 bnx2_set_link(struct bnx2 *bp)
910 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
915 link_up = bp->link_up;
917 bnx2_read_phy(bp, MII_BMSR, &bmsr);
918 bnx2_read_phy(bp, MII_BMSR, &bmsr);
920 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
921 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
924 val = REG_RD(bp, BNX2_EMAC_STATUS);
925 if (val & BNX2_EMAC_STATUS_LINK)
926 bmsr |= BMSR_LSTATUS;
928 bmsr &= ~BMSR_LSTATUS;
931 if (bmsr & BMSR_LSTATUS) {
934 if (bp->phy_flags & PHY_SERDES_FLAG) {
935 if (CHIP_NUM(bp) == CHIP_NUM_5706)
936 bnx2_5706s_linkup(bp);
937 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
938 bnx2_5708s_linkup(bp);
941 bnx2_copper_linkup(bp);
943 bnx2_resolve_flow_ctrl(bp);
946 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
947 (bp->autoneg & AUTONEG_SPEED)) {
951 bnx2_read_phy(bp, MII_BMCR, &bmcr);
952 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
953 if (!(bmcr & BMCR_ANENABLE)) {
954 bnx2_write_phy(bp, MII_BMCR, bmcr |
958 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
962 if (bp->link_up != link_up) {
963 bnx2_report_link(bp);
966 bnx2_set_mac_link(bp);
972 bnx2_reset_phy(struct bnx2 *bp)
977 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
979 #define PHY_RESET_MAX_WAIT 100
980 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
983 bnx2_read_phy(bp, MII_BMCR, ®);
984 if (!(reg & BMCR_RESET)) {
989 if (i == PHY_RESET_MAX_WAIT) {
996 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1000 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1001 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1003 if (bp->phy_flags & PHY_SERDES_FLAG) {
1004 adv = ADVERTISE_1000XPAUSE;
1007 adv = ADVERTISE_PAUSE_CAP;
1010 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1011 if (bp->phy_flags & PHY_SERDES_FLAG) {
1012 adv = ADVERTISE_1000XPSE_ASYM;
1015 adv = ADVERTISE_PAUSE_ASYM;
1018 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1019 if (bp->phy_flags & PHY_SERDES_FLAG) {
1020 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1023 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1030 bnx2_setup_serdes_phy(struct bnx2 *bp)
1035 if (!(bp->autoneg & AUTONEG_SPEED)) {
1037 int force_link_down = 0;
1039 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1040 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1042 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1043 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1044 new_bmcr |= BMCR_SPEED1000;
1045 if (bp->req_line_speed == SPEED_2500) {
1046 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1047 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1048 if (!(up1 & BCM5708S_UP1_2G5)) {
1049 up1 |= BCM5708S_UP1_2G5;
1050 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1051 force_link_down = 1;
1053 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1054 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1055 if (up1 & BCM5708S_UP1_2G5) {
1056 up1 &= ~BCM5708S_UP1_2G5;
1057 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1058 force_link_down = 1;
1062 if (bp->req_duplex == DUPLEX_FULL) {
1063 adv |= ADVERTISE_1000XFULL;
1064 new_bmcr |= BMCR_FULLDPLX;
1067 adv |= ADVERTISE_1000XHALF;
1068 new_bmcr &= ~BMCR_FULLDPLX;
1070 if ((new_bmcr != bmcr) || (force_link_down)) {
1071 /* Force a link down visible on the other side */
1073 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1074 ~(ADVERTISE_1000XFULL |
1075 ADVERTISE_1000XHALF));
1076 bnx2_write_phy(bp, MII_BMCR, bmcr |
1077 BMCR_ANRESTART | BMCR_ANENABLE);
1080 netif_carrier_off(bp->dev);
1081 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1082 bnx2_report_link(bp);
1084 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1085 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1090 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1091 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1092 up1 |= BCM5708S_UP1_2G5;
1093 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1096 if (bp->advertising & ADVERTISED_1000baseT_Full)
1097 new_adv |= ADVERTISE_1000XFULL;
1099 new_adv |= bnx2_phy_get_pause_adv(bp);
1101 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1102 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1104 bp->serdes_an_pending = 0;
1105 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1106 /* Force a link down visible on the other side */
1108 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1109 spin_unlock_bh(&bp->phy_lock);
1111 spin_lock_bh(&bp->phy_lock);
1114 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1115 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1117 /* Speed up link-up time when the link partner
1118 * does not autonegotiate which is very common
1119 * in blade servers. Some blade servers use
1120 * IPMI for kerboard input and it's important
1121 * to minimize link disruptions. Autoneg. involves
1122 * exchanging base pages plus 3 next pages and
1123 * normally completes in about 120 msec.
1125 bp->current_interval = SERDES_AN_TIMEOUT;
1126 bp->serdes_an_pending = 1;
1127 mod_timer(&bp->timer, jiffies + bp->current_interval);
1133 #define ETHTOOL_ALL_FIBRE_SPEED \
1134 (ADVERTISED_1000baseT_Full)
1136 #define ETHTOOL_ALL_COPPER_SPEED \
1137 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1138 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1139 ADVERTISED_1000baseT_Full)
1141 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1142 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1144 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1147 bnx2_setup_copper_phy(struct bnx2 *bp)
1152 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1154 if (bp->autoneg & AUTONEG_SPEED) {
1155 u32 adv_reg, adv1000_reg;
1156 u32 new_adv_reg = 0;
1157 u32 new_adv1000_reg = 0;
1159 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1160 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1161 ADVERTISE_PAUSE_ASYM);
1163 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1164 adv1000_reg &= PHY_ALL_1000_SPEED;
1166 if (bp->advertising & ADVERTISED_10baseT_Half)
1167 new_adv_reg |= ADVERTISE_10HALF;
1168 if (bp->advertising & ADVERTISED_10baseT_Full)
1169 new_adv_reg |= ADVERTISE_10FULL;
1170 if (bp->advertising & ADVERTISED_100baseT_Half)
1171 new_adv_reg |= ADVERTISE_100HALF;
1172 if (bp->advertising & ADVERTISED_100baseT_Full)
1173 new_adv_reg |= ADVERTISE_100FULL;
1174 if (bp->advertising & ADVERTISED_1000baseT_Full)
1175 new_adv1000_reg |= ADVERTISE_1000FULL;
1177 new_adv_reg |= ADVERTISE_CSMA;
1179 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1181 if ((adv1000_reg != new_adv1000_reg) ||
1182 (adv_reg != new_adv_reg) ||
1183 ((bmcr & BMCR_ANENABLE) == 0)) {
1185 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1186 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1187 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1190 else if (bp->link_up) {
1191 /* Flow ctrl may have changed from auto to forced */
1192 /* or vice-versa. */
1194 bnx2_resolve_flow_ctrl(bp);
1195 bnx2_set_mac_link(bp);
1201 if (bp->req_line_speed == SPEED_100) {
1202 new_bmcr |= BMCR_SPEED100;
1204 if (bp->req_duplex == DUPLEX_FULL) {
1205 new_bmcr |= BMCR_FULLDPLX;
1207 if (new_bmcr != bmcr) {
1210 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1211 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1213 if (bmsr & BMSR_LSTATUS) {
1214 /* Force link down */
1215 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1216 spin_unlock_bh(&bp->phy_lock);
1218 spin_lock_bh(&bp->phy_lock);
1220 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1221 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1224 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1226 /* Normally, the new speed is setup after the link has
1227 * gone down and up again. In some cases, link will not go
1228 * down so we need to set up the new speed here.
1230 if (bmsr & BMSR_LSTATUS) {
1231 bp->line_speed = bp->req_line_speed;
1232 bp->duplex = bp->req_duplex;
1233 bnx2_resolve_flow_ctrl(bp);
1234 bnx2_set_mac_link(bp);
1241 bnx2_setup_phy(struct bnx2 *bp)
1243 if (bp->loopback == MAC_LOOPBACK)
1246 if (bp->phy_flags & PHY_SERDES_FLAG) {
1247 return (bnx2_setup_serdes_phy(bp));
1250 return (bnx2_setup_copper_phy(bp));
1255 bnx2_init_5708s_phy(struct bnx2 *bp)
1259 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1260 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1261 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1263 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1264 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1265 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1267 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1268 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1269 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1271 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1272 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1273 val |= BCM5708S_UP1_2G5;
1274 bnx2_write_phy(bp, BCM5708S_UP1, val);
1277 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1278 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1279 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1280 /* increase tx signal amplitude */
1281 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1282 BCM5708S_BLK_ADDR_TX_MISC);
1283 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1284 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1285 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1286 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1289 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1290 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1295 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1296 BNX2_SHARED_HW_CFG_CONFIG);
1297 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1298 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299 BCM5708S_BLK_ADDR_TX_MISC);
1300 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1301 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1302 BCM5708S_BLK_ADDR_DIG);
1309 bnx2_init_5706s_phy(struct bnx2 *bp)
1311 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1313 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1314 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1316 if (bp->dev->mtu > 1500) {
1319 /* Set extended packet length bit */
1320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1324 bnx2_write_phy(bp, 0x1c, 0x6c00);
1325 bnx2_read_phy(bp, 0x1c, &val);
1326 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1331 bnx2_write_phy(bp, 0x18, 0x7);
1332 bnx2_read_phy(bp, 0x18, &val);
1333 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1335 bnx2_write_phy(bp, 0x1c, 0x6c00);
1336 bnx2_read_phy(bp, 0x1c, &val);
1337 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1344 bnx2_init_copper_phy(struct bnx2 *bp)
1348 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1349 bnx2_write_phy(bp, 0x18, 0x0c00);
1350 bnx2_write_phy(bp, 0x17, 0x000a);
1351 bnx2_write_phy(bp, 0x15, 0x310b);
1352 bnx2_write_phy(bp, 0x17, 0x201f);
1353 bnx2_write_phy(bp, 0x15, 0x9506);
1354 bnx2_write_phy(bp, 0x17, 0x401f);
1355 bnx2_write_phy(bp, 0x15, 0x14e2);
1356 bnx2_write_phy(bp, 0x18, 0x0400);
1359 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1360 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1361 MII_BNX2_DSP_EXPAND_REG | 0x8);
1362 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1364 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1367 if (bp->dev->mtu > 1500) {
1368 /* Set extended packet length bit */
1369 bnx2_write_phy(bp, 0x18, 0x7);
1370 bnx2_read_phy(bp, 0x18, &val);
1371 bnx2_write_phy(bp, 0x18, val | 0x4000);
1373 bnx2_read_phy(bp, 0x10, &val);
1374 bnx2_write_phy(bp, 0x10, val | 0x1);
1377 bnx2_write_phy(bp, 0x18, 0x7);
1378 bnx2_read_phy(bp, 0x18, &val);
1379 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1381 bnx2_read_phy(bp, 0x10, &val);
1382 bnx2_write_phy(bp, 0x10, val & ~0x1);
1385 /* ethernet@wirespeed */
1386 bnx2_write_phy(bp, 0x18, 0x7007);
1387 bnx2_read_phy(bp, 0x18, &val);
1388 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1394 bnx2_init_phy(struct bnx2 *bp)
1399 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1400 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1402 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1406 bnx2_read_phy(bp, MII_PHYSID1, &val);
1407 bp->phy_id = val << 16;
1408 bnx2_read_phy(bp, MII_PHYSID2, &val);
1409 bp->phy_id |= val & 0xffff;
1411 if (bp->phy_flags & PHY_SERDES_FLAG) {
1412 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1413 rc = bnx2_init_5706s_phy(bp);
1414 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1415 rc = bnx2_init_5708s_phy(bp);
1418 rc = bnx2_init_copper_phy(bp);
1427 bnx2_set_mac_loopback(struct bnx2 *bp)
1431 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1432 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1433 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1434 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1439 static int bnx2_test_link(struct bnx2 *);
1442 bnx2_set_phy_loopback(struct bnx2 *bp)
1447 spin_lock_bh(&bp->phy_lock);
1448 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1450 spin_unlock_bh(&bp->phy_lock);
1454 for (i = 0; i < 10; i++) {
1455 if (bnx2_test_link(bp) == 0)
1460 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1461 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1462 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1463 BNX2_EMAC_MODE_25G_MODE);
1465 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1466 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1472 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1478 msg_data |= bp->fw_wr_seq;
1480 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1482 /* wait for an acknowledgement. */
1483 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1486 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1488 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1491 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1494 /* If we timed out, inform the firmware that this is the case. */
1495 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1497 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1500 msg_data &= ~BNX2_DRV_MSG_CODE;
1501 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1503 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1508 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1515 bnx2_init_5709_context(struct bnx2 *bp)
1520 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1521 val |= (BCM_PAGE_BITS - 8) << 16;
1522 REG_WR(bp, BNX2_CTX_COMMAND, val);
1523 for (i = 0; i < bp->ctx_pages; i++) {
1526 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1527 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1528 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1529 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1530 (u64) bp->ctx_blk_mapping[i] >> 32);
1531 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1532 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1533 for (j = 0; j < 10; j++) {
1535 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1536 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1540 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1549 bnx2_init_context(struct bnx2 *bp)
1555 u32 vcid_addr, pcid_addr, offset;
1559 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1562 vcid_addr = GET_PCID_ADDR(vcid);
1564 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1569 pcid_addr = GET_PCID_ADDR(new_vcid);
1572 vcid_addr = GET_CID_ADDR(vcid);
1573 pcid_addr = vcid_addr;
1576 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1577 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1579 /* Zero out the context. */
1580 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1581 CTX_WR(bp, 0x00, offset, 0);
1584 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1585 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1590 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1596 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1597 if (good_mbuf == NULL) {
1598 printk(KERN_ERR PFX "Failed to allocate memory in "
1599 "bnx2_alloc_bad_rbuf\n");
1603 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1604 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1608 /* Allocate a bunch of mbufs and save the good ones in an array. */
1609 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1610 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1611 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1613 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1615 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1617 /* The addresses with Bit 9 set are bad memory blocks. */
1618 if (!(val & (1 << 9))) {
1619 good_mbuf[good_mbuf_cnt] = (u16) val;
1623 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1626 /* Free the good ones back to the mbuf pool thus discarding
1627 * all the bad ones. */
1628 while (good_mbuf_cnt) {
1631 val = good_mbuf[good_mbuf_cnt];
1632 val = (val << 9) | val | 1;
1634 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1641 bnx2_set_mac_addr(struct bnx2 *bp)
1644 u8 *mac_addr = bp->dev->dev_addr;
1646 val = (mac_addr[0] << 8) | mac_addr[1];
1648 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1650 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1651 (mac_addr[4] << 8) | mac_addr[5];
1653 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1657 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1659 struct sk_buff *skb;
1660 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1662 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1663 unsigned long align;
1665 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1670 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1671 skb_reserve(skb, BNX2_RX_ALIGN - align);
1673 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1674 PCI_DMA_FROMDEVICE);
1677 pci_unmap_addr_set(rx_buf, mapping, mapping);
1679 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1680 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1682 bp->rx_prod_bseq += bp->rx_buf_use_size;
1688 bnx2_phy_int(struct bnx2 *bp)
1690 u32 new_link_state, old_link_state;
1692 new_link_state = bp->status_blk->status_attn_bits &
1693 STATUS_ATTN_BITS_LINK_STATE;
1694 old_link_state = bp->status_blk->status_attn_bits_ack &
1695 STATUS_ATTN_BITS_LINK_STATE;
1696 if (new_link_state != old_link_state) {
1697 if (new_link_state) {
1698 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1699 STATUS_ATTN_BITS_LINK_STATE);
1702 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1703 STATUS_ATTN_BITS_LINK_STATE);
1710 bnx2_tx_int(struct bnx2 *bp)
1712 struct status_block *sblk = bp->status_blk;
1713 u16 hw_cons, sw_cons, sw_ring_cons;
1716 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1717 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1720 sw_cons = bp->tx_cons;
1722 while (sw_cons != hw_cons) {
1723 struct sw_bd *tx_buf;
1724 struct sk_buff *skb;
1727 sw_ring_cons = TX_RING_IDX(sw_cons);
1729 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1732 /* partial BD completions possible with TSO packets */
1733 if (skb_is_gso(skb)) {
1734 u16 last_idx, last_ring_idx;
1736 last_idx = sw_cons +
1737 skb_shinfo(skb)->nr_frags + 1;
1738 last_ring_idx = sw_ring_cons +
1739 skb_shinfo(skb)->nr_frags + 1;
1740 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1743 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1748 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1749 skb_headlen(skb), PCI_DMA_TODEVICE);
1752 last = skb_shinfo(skb)->nr_frags;
1754 for (i = 0; i < last; i++) {
1755 sw_cons = NEXT_TX_BD(sw_cons);
1757 pci_unmap_page(bp->pdev,
1759 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1761 skb_shinfo(skb)->frags[i].size,
1765 sw_cons = NEXT_TX_BD(sw_cons);
1767 tx_free_bd += last + 1;
1771 hw_cons = bp->hw_tx_cons =
1772 sblk->status_tx_quick_consumer_index0;
1774 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1779 bp->tx_cons = sw_cons;
1780 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1781 * before checking for netif_queue_stopped(). Without the
1782 * memory barrier, there is a small possibility that bnx2_start_xmit()
1783 * will miss it and cause the queue to be stopped forever.
1787 if (unlikely(netif_queue_stopped(bp->dev)) &&
1788 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1789 netif_tx_lock(bp->dev);
1790 if ((netif_queue_stopped(bp->dev)) &&
1791 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1792 netif_wake_queue(bp->dev);
1793 netif_tx_unlock(bp->dev);
1798 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1801 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1802 struct rx_bd *cons_bd, *prod_bd;
1804 cons_rx_buf = &bp->rx_buf_ring[cons];
1805 prod_rx_buf = &bp->rx_buf_ring[prod];
1807 pci_dma_sync_single_for_device(bp->pdev,
1808 pci_unmap_addr(cons_rx_buf, mapping),
1809 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1811 bp->rx_prod_bseq += bp->rx_buf_use_size;
1813 prod_rx_buf->skb = skb;
1818 pci_unmap_addr_set(prod_rx_buf, mapping,
1819 pci_unmap_addr(cons_rx_buf, mapping));
1821 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1822 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1823 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1824 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1828 bnx2_rx_int(struct bnx2 *bp, int budget)
1830 struct status_block *sblk = bp->status_blk;
1831 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1832 struct l2_fhdr *rx_hdr;
1835 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1836 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1839 sw_cons = bp->rx_cons;
1840 sw_prod = bp->rx_prod;
1842 /* Memory barrier necessary as speculative reads of the rx
1843 * buffer can be ahead of the index in the status block
1846 while (sw_cons != hw_cons) {
1849 struct sw_bd *rx_buf;
1850 struct sk_buff *skb;
1851 dma_addr_t dma_addr;
1853 sw_ring_cons = RX_RING_IDX(sw_cons);
1854 sw_ring_prod = RX_RING_IDX(sw_prod);
1856 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1861 dma_addr = pci_unmap_addr(rx_buf, mapping);
1863 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1864 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1866 rx_hdr = (struct l2_fhdr *) skb->data;
1867 len = rx_hdr->l2_fhdr_pkt_len - 4;
1869 if ((status = rx_hdr->l2_fhdr_status) &
1870 (L2_FHDR_ERRORS_BAD_CRC |
1871 L2_FHDR_ERRORS_PHY_DECODE |
1872 L2_FHDR_ERRORS_ALIGNMENT |
1873 L2_FHDR_ERRORS_TOO_SHORT |
1874 L2_FHDR_ERRORS_GIANT_FRAME)) {
1879 /* Since we don't have a jumbo ring, copy small packets
1882 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1883 struct sk_buff *new_skb;
1885 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1886 if (new_skb == NULL)
1890 memcpy(new_skb->data,
1891 skb->data + bp->rx_offset - 2,
1894 skb_reserve(new_skb, 2);
1895 skb_put(new_skb, len);
1897 bnx2_reuse_rx_skb(bp, skb,
1898 sw_ring_cons, sw_ring_prod);
1902 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1903 pci_unmap_single(bp->pdev, dma_addr,
1904 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1906 skb_reserve(skb, bp->rx_offset);
1911 bnx2_reuse_rx_skb(bp, skb,
1912 sw_ring_cons, sw_ring_prod);
1916 skb->protocol = eth_type_trans(skb, bp->dev);
1918 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1919 (ntohs(skb->protocol) != 0x8100)) {
1926 skb->ip_summed = CHECKSUM_NONE;
1928 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1929 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1931 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1932 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1933 skb->ip_summed = CHECKSUM_UNNECESSARY;
1937 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1938 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1939 rx_hdr->l2_fhdr_vlan_tag);
1943 netif_receive_skb(skb);
1945 bp->dev->last_rx = jiffies;
1949 sw_cons = NEXT_RX_BD(sw_cons);
1950 sw_prod = NEXT_RX_BD(sw_prod);
1952 if ((rx_pkt == budget))
1955 /* Refresh hw_cons to see if there is new work */
1956 if (sw_cons == hw_cons) {
1957 hw_cons = bp->hw_rx_cons =
1958 sblk->status_rx_quick_consumer_index0;
1959 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1964 bp->rx_cons = sw_cons;
1965 bp->rx_prod = sw_prod;
1967 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1969 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1977 /* MSI ISR - The only difference between this and the INTx ISR
1978 * is that the MSI interrupt is always serviced.
1981 bnx2_msi(int irq, void *dev_instance)
1983 struct net_device *dev = dev_instance;
1984 struct bnx2 *bp = netdev_priv(dev);
1986 prefetch(bp->status_blk);
1987 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1988 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1989 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1991 /* Return here if interrupt is disabled. */
1992 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1995 netif_rx_schedule(dev);
2001 bnx2_interrupt(int irq, void *dev_instance)
2003 struct net_device *dev = dev_instance;
2004 struct bnx2 *bp = netdev_priv(dev);
2006 /* When using INTx, it is possible for the interrupt to arrive
2007 * at the CPU before the status block posted prior to the
2008 * interrupt. Reading a register will flush the status block.
2009 * When using MSI, the MSI message will always complete after
2010 * the status block write.
2012 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2013 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2014 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2017 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2018 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2019 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2021 /* Return here if interrupt is shared and is disabled. */
2022 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2025 netif_rx_schedule(dev);
2031 bnx2_has_work(struct bnx2 *bp)
2033 struct status_block *sblk = bp->status_blk;
2035 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2036 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2039 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2047 bnx2_poll(struct net_device *dev, int *budget)
2049 struct bnx2 *bp = netdev_priv(dev);
2051 if ((bp->status_blk->status_attn_bits &
2052 STATUS_ATTN_BITS_LINK_STATE) !=
2053 (bp->status_blk->status_attn_bits_ack &
2054 STATUS_ATTN_BITS_LINK_STATE)) {
2056 spin_lock(&bp->phy_lock);
2058 spin_unlock(&bp->phy_lock);
2060 /* This is needed to take care of transient status
2061 * during link changes.
2063 REG_WR(bp, BNX2_HC_COMMAND,
2064 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2065 REG_RD(bp, BNX2_HC_COMMAND);
2068 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2071 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2072 int orig_budget = *budget;
2075 if (orig_budget > dev->quota)
2076 orig_budget = dev->quota;
2078 work_done = bnx2_rx_int(bp, orig_budget);
2079 *budget -= work_done;
2080 dev->quota -= work_done;
2083 bp->last_status_idx = bp->status_blk->status_idx;
2086 if (!bnx2_has_work(bp)) {
2087 netif_rx_complete(dev);
2088 if (likely(bp->flags & USING_MSI_FLAG)) {
2089 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2090 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2091 bp->last_status_idx);
2094 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2095 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2096 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2097 bp->last_status_idx);
2099 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2100 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2101 bp->last_status_idx);
2108 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2109 * from set_multicast.
2112 bnx2_set_rx_mode(struct net_device *dev)
2114 struct bnx2 *bp = netdev_priv(dev);
2115 u32 rx_mode, sort_mode;
2118 spin_lock_bh(&bp->phy_lock);
2120 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2121 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2122 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2124 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2125 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2127 if (!(bp->flags & ASF_ENABLE_FLAG))
2128 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2130 if (dev->flags & IFF_PROMISC) {
2131 /* Promiscuous mode. */
2132 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2133 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2134 BNX2_RPM_SORT_USER0_PROM_VLAN;
2136 else if (dev->flags & IFF_ALLMULTI) {
2137 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2138 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2141 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2144 /* Accept one or more multicast(s). */
2145 struct dev_mc_list *mclist;
2146 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2151 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2153 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2154 i++, mclist = mclist->next) {
2156 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2158 regidx = (bit & 0xe0) >> 5;
2160 mc_filter[regidx] |= (1 << bit);
2163 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2164 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2168 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2171 if (rx_mode != bp->rx_mode) {
2172 bp->rx_mode = rx_mode;
2173 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2176 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2177 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2178 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2180 spin_unlock_bh(&bp->phy_lock);
2183 #define FW_BUF_SIZE 0x8000
2186 bnx2_gunzip_init(struct bnx2 *bp)
2188 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2191 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2194 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2195 if (bp->strm->workspace == NULL)
2205 vfree(bp->gunzip_buf);
2206 bp->gunzip_buf = NULL;
2209 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2210 "uncompression.\n", bp->dev->name);
2215 bnx2_gunzip_end(struct bnx2 *bp)
2217 kfree(bp->strm->workspace);
2222 if (bp->gunzip_buf) {
2223 vfree(bp->gunzip_buf);
2224 bp->gunzip_buf = NULL;
2229 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2233 /* check gzip header */
2234 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2240 if (zbuf[3] & FNAME)
2241 while ((zbuf[n++] != 0) && (n < len));
2243 bp->strm->next_in = zbuf + n;
2244 bp->strm->avail_in = len - n;
2245 bp->strm->next_out = bp->gunzip_buf;
2246 bp->strm->avail_out = FW_BUF_SIZE;
2248 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2252 rc = zlib_inflate(bp->strm, Z_FINISH);
2254 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2255 *outbuf = bp->gunzip_buf;
2257 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2258 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2259 bp->dev->name, bp->strm->msg);
2261 zlib_inflateEnd(bp->strm);
2263 if (rc == Z_STREAM_END)
2270 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2277 for (i = 0; i < rv2p_code_len; i += 8) {
2278 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2280 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2283 if (rv2p_proc == RV2P_PROC1) {
2284 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2285 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2288 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2289 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2293 /* Reset the processor, un-stall is done later. */
2294 if (rv2p_proc == RV2P_PROC1) {
2295 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2298 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2303 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2310 val = REG_RD_IND(bp, cpu_reg->mode);
2311 val |= cpu_reg->mode_value_halt;
2312 REG_WR_IND(bp, cpu_reg->mode, val);
2313 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2315 /* Load the Text area. */
2316 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2321 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2331 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2332 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2336 /* Load the Data area. */
2337 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2341 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2342 REG_WR_IND(bp, offset, fw->data[j]);
2346 /* Load the SBSS area. */
2347 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2351 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2352 REG_WR_IND(bp, offset, fw->sbss[j]);
2356 /* Load the BSS area. */
2357 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2361 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2362 REG_WR_IND(bp, offset, fw->bss[j]);
2366 /* Load the Read-Only area. */
2367 offset = cpu_reg->spad_base +
2368 (fw->rodata_addr - cpu_reg->mips_view_base);
2372 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2373 REG_WR_IND(bp, offset, fw->rodata[j]);
2377 /* Clear the pre-fetch instruction. */
2378 REG_WR_IND(bp, cpu_reg->inst, 0);
2379 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2381 /* Start the CPU. */
2382 val = REG_RD_IND(bp, cpu_reg->mode);
2383 val &= ~cpu_reg->mode_value_halt;
2384 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2385 REG_WR_IND(bp, cpu_reg->mode, val);
2391 bnx2_init_cpus(struct bnx2 *bp)
2393 struct cpu_reg cpu_reg;
2399 if ((rc = bnx2_gunzip_init(bp)) != 0)
2402 /* Initialize the RV2P processor. */
2403 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2408 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2410 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2415 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2417 /* Initialize the RX Processor. */
2418 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2419 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2420 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2421 cpu_reg.state = BNX2_RXP_CPU_STATE;
2422 cpu_reg.state_value_clear = 0xffffff;
2423 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2424 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2425 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2426 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2427 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2428 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2429 cpu_reg.mips_view_base = 0x8000000;
2431 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2432 fw = &bnx2_rxp_fw_09;
2434 fw = &bnx2_rxp_fw_06;
2436 rc = load_cpu_fw(bp, &cpu_reg, fw);
2440 /* Initialize the TX Processor. */
2441 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2442 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2443 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2444 cpu_reg.state = BNX2_TXP_CPU_STATE;
2445 cpu_reg.state_value_clear = 0xffffff;
2446 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2447 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2448 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2449 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2450 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2451 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2452 cpu_reg.mips_view_base = 0x8000000;
2454 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2455 fw = &bnx2_txp_fw_09;
2457 fw = &bnx2_txp_fw_06;
2459 rc = load_cpu_fw(bp, &cpu_reg, fw);
2463 /* Initialize the TX Patch-up Processor. */
2464 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2465 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2466 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2467 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2468 cpu_reg.state_value_clear = 0xffffff;
2469 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2470 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2471 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2472 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2473 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2474 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2475 cpu_reg.mips_view_base = 0x8000000;
2477 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2478 fw = &bnx2_tpat_fw_09;
2480 fw = &bnx2_tpat_fw_06;
2482 rc = load_cpu_fw(bp, &cpu_reg, fw);
2486 /* Initialize the Completion Processor. */
2487 cpu_reg.mode = BNX2_COM_CPU_MODE;
2488 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2489 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2490 cpu_reg.state = BNX2_COM_CPU_STATE;
2491 cpu_reg.state_value_clear = 0xffffff;
2492 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2493 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2494 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2495 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2496 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2497 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2498 cpu_reg.mips_view_base = 0x8000000;
2500 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2501 fw = &bnx2_com_fw_09;
2503 fw = &bnx2_com_fw_06;
2505 rc = load_cpu_fw(bp, &cpu_reg, fw);
2509 /* Initialize the Command Processor. */
2510 cpu_reg.mode = BNX2_CP_CPU_MODE;
2511 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2512 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2513 cpu_reg.state = BNX2_CP_CPU_STATE;
2514 cpu_reg.state_value_clear = 0xffffff;
2515 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2516 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2517 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2518 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2519 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2520 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2521 cpu_reg.mips_view_base = 0x8000000;
2523 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2524 fw = &bnx2_cp_fw_09;
2526 rc = load_cpu_fw(bp, &cpu_reg, fw);
2531 bnx2_gunzip_end(bp);
2536 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2540 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2546 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2547 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2548 PCI_PM_CTRL_PME_STATUS);
2550 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2551 /* delay required during transition out of D3hot */
2554 val = REG_RD(bp, BNX2_EMAC_MODE);
2555 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2556 val &= ~BNX2_EMAC_MODE_MPKT;
2557 REG_WR(bp, BNX2_EMAC_MODE, val);
2559 val = REG_RD(bp, BNX2_RPM_CONFIG);
2560 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2561 REG_WR(bp, BNX2_RPM_CONFIG, val);
2572 autoneg = bp->autoneg;
2573 advertising = bp->advertising;
2575 bp->autoneg = AUTONEG_SPEED;
2576 bp->advertising = ADVERTISED_10baseT_Half |
2577 ADVERTISED_10baseT_Full |
2578 ADVERTISED_100baseT_Half |
2579 ADVERTISED_100baseT_Full |
2582 bnx2_setup_copper_phy(bp);
2584 bp->autoneg = autoneg;
2585 bp->advertising = advertising;
2587 bnx2_set_mac_addr(bp);
2589 val = REG_RD(bp, BNX2_EMAC_MODE);
2591 /* Enable port mode. */
2592 val &= ~BNX2_EMAC_MODE_PORT;
2593 val |= BNX2_EMAC_MODE_PORT_MII |
2594 BNX2_EMAC_MODE_MPKT_RCVD |
2595 BNX2_EMAC_MODE_ACPI_RCVD |
2596 BNX2_EMAC_MODE_MPKT;
2598 REG_WR(bp, BNX2_EMAC_MODE, val);
2600 /* receive all multicast */
2601 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2602 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2605 REG_WR(bp, BNX2_EMAC_RX_MODE,
2606 BNX2_EMAC_RX_MODE_SORT_MODE);
2608 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2609 BNX2_RPM_SORT_USER0_MC_EN;
2610 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2611 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2612 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2613 BNX2_RPM_SORT_USER0_ENA);
2615 /* Need to enable EMAC and RPM for WOL. */
2616 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2617 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2618 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2619 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2621 val = REG_RD(bp, BNX2_RPM_CONFIG);
2622 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2623 REG_WR(bp, BNX2_RPM_CONFIG, val);
2625 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2628 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2631 if (!(bp->flags & NO_WOL_FLAG))
2632 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2634 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2635 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2636 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2645 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2647 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2650 /* No more memory access after this point until
2651 * device is brought back to D0.
2663 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2668 /* Request access to the flash interface. */
2669 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2670 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2671 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2672 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2678 if (j >= NVRAM_TIMEOUT_COUNT)
2685 bnx2_release_nvram_lock(struct bnx2 *bp)
2690 /* Relinquish nvram interface. */
2691 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2693 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2694 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2695 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2701 if (j >= NVRAM_TIMEOUT_COUNT)
2709 bnx2_enable_nvram_write(struct bnx2 *bp)
2713 val = REG_RD(bp, BNX2_MISC_CFG);
2714 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2716 if (!bp->flash_info->buffered) {
2719 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2720 REG_WR(bp, BNX2_NVM_COMMAND,
2721 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2723 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2726 val = REG_RD(bp, BNX2_NVM_COMMAND);
2727 if (val & BNX2_NVM_COMMAND_DONE)
2731 if (j >= NVRAM_TIMEOUT_COUNT)
2738 bnx2_disable_nvram_write(struct bnx2 *bp)
2742 val = REG_RD(bp, BNX2_MISC_CFG);
2743 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2748 bnx2_enable_nvram_access(struct bnx2 *bp)
2752 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2753 /* Enable both bits, even on read. */
2754 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2755 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2759 bnx2_disable_nvram_access(struct bnx2 *bp)
2763 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2764 /* Disable both bits, even after read. */
2765 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2766 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2767 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2771 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2776 if (bp->flash_info->buffered)
2777 /* Buffered flash, no erase needed */
2780 /* Build an erase command */
2781 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2782 BNX2_NVM_COMMAND_DOIT;
2784 /* Need to clear DONE bit separately. */
2785 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2787 /* Address of the NVRAM to read from. */
2788 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2790 /* Issue an erase command. */
2791 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2793 /* Wait for completion. */
2794 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2799 val = REG_RD(bp, BNX2_NVM_COMMAND);
2800 if (val & BNX2_NVM_COMMAND_DONE)
2804 if (j >= NVRAM_TIMEOUT_COUNT)
2811 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2816 /* Build the command word. */
2817 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2819 /* Calculate an offset of a buffered flash. */
2820 if (bp->flash_info->buffered) {
2821 offset = ((offset / bp->flash_info->page_size) <<
2822 bp->flash_info->page_bits) +
2823 (offset % bp->flash_info->page_size);
2826 /* Need to clear DONE bit separately. */
2827 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2829 /* Address of the NVRAM to read from. */
2830 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2832 /* Issue a read command. */
2833 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2835 /* Wait for completion. */
2836 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2841 val = REG_RD(bp, BNX2_NVM_COMMAND);
2842 if (val & BNX2_NVM_COMMAND_DONE) {
2843 val = REG_RD(bp, BNX2_NVM_READ);
2845 val = be32_to_cpu(val);
2846 memcpy(ret_val, &val, 4);
2850 if (j >= NVRAM_TIMEOUT_COUNT)
2858 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2863 /* Build the command word. */
2864 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2866 /* Calculate an offset of a buffered flash. */
2867 if (bp->flash_info->buffered) {
2868 offset = ((offset / bp->flash_info->page_size) <<
2869 bp->flash_info->page_bits) +
2870 (offset % bp->flash_info->page_size);
2873 /* Need to clear DONE bit separately. */
2874 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2876 memcpy(&val32, val, 4);
2877 val32 = cpu_to_be32(val32);
2879 /* Write the data. */
2880 REG_WR(bp, BNX2_NVM_WRITE, val32);
2882 /* Address of the NVRAM to write to. */
2883 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2885 /* Issue the write command. */
2886 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2888 /* Wait for completion. */
2889 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2892 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2895 if (j >= NVRAM_TIMEOUT_COUNT)
2902 bnx2_init_nvram(struct bnx2 *bp)
2905 int j, entry_count, rc;
2906 struct flash_spec *flash;
2908 /* Determine the selected interface. */
2909 val = REG_RD(bp, BNX2_NVM_CFG1);
2911 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2914 if (val & 0x40000000) {
2916 /* Flash interface has been reconfigured */
2917 for (j = 0, flash = &flash_table[0]; j < entry_count;
2919 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2920 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2921 bp->flash_info = flash;
2928 /* Not yet been reconfigured */
2930 if (val & (1 << 23))
2931 mask = FLASH_BACKUP_STRAP_MASK;
2933 mask = FLASH_STRAP_MASK;
2935 for (j = 0, flash = &flash_table[0]; j < entry_count;
2938 if ((val & mask) == (flash->strapping & mask)) {
2939 bp->flash_info = flash;
2941 /* Request access to the flash interface. */
2942 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2945 /* Enable access to flash interface */
2946 bnx2_enable_nvram_access(bp);
2948 /* Reconfigure the flash interface */
2949 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2950 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2951 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2952 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2954 /* Disable access to flash interface */
2955 bnx2_disable_nvram_access(bp);
2956 bnx2_release_nvram_lock(bp);
2961 } /* if (val & 0x40000000) */
2963 if (j == entry_count) {
2964 bp->flash_info = NULL;
2965 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2969 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2970 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2972 bp->flash_size = val;
2974 bp->flash_size = bp->flash_info->total_size;
2980 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2984 u32 cmd_flags, offset32, len32, extra;
2989 /* Request access to the flash interface. */
2990 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2993 /* Enable access to flash interface */
2994 bnx2_enable_nvram_access(bp);
3007 pre_len = 4 - (offset & 3);
3009 if (pre_len >= len32) {
3011 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3012 BNX2_NVM_COMMAND_LAST;
3015 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3018 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3023 memcpy(ret_buf, buf + (offset & 3), pre_len);
3030 extra = 4 - (len32 & 3);
3031 len32 = (len32 + 4) & ~3;
3038 cmd_flags = BNX2_NVM_COMMAND_LAST;
3040 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3041 BNX2_NVM_COMMAND_LAST;
3043 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3045 memcpy(ret_buf, buf, 4 - extra);
3047 else if (len32 > 0) {
3050 /* Read the first word. */
3054 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3056 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3058 /* Advance to the next dword. */
3063 while (len32 > 4 && rc == 0) {
3064 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3066 /* Advance to the next dword. */
3075 cmd_flags = BNX2_NVM_COMMAND_LAST;
3076 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3078 memcpy(ret_buf, buf, 4 - extra);
3081 /* Disable access to flash interface */
3082 bnx2_disable_nvram_access(bp);
3084 bnx2_release_nvram_lock(bp);
3090 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3093 u32 written, offset32, len32;
3094 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3096 int align_start, align_end;
3101 align_start = align_end = 0;
3103 if ((align_start = (offset32 & 3))) {
3105 len32 += (4 - align_start);
3106 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3111 if ((len32 > 4) || !align_start) {
3112 align_end = 4 - (len32 & 3);
3114 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3121 if (align_start || align_end) {
3122 align_buf = kmalloc(len32, GFP_KERNEL);
3123 if (align_buf == NULL)
3126 memcpy(align_buf, start, 4);
3129 memcpy(align_buf + len32 - 4, end, 4);
3131 memcpy(align_buf + align_start, data_buf, buf_size);
3135 if (bp->flash_info->buffered == 0) {
3136 flash_buffer = kmalloc(264, GFP_KERNEL);
3137 if (flash_buffer == NULL) {
3139 goto nvram_write_end;
3144 while ((written < len32) && (rc == 0)) {
3145 u32 page_start, page_end, data_start, data_end;
3146 u32 addr, cmd_flags;
3149 /* Find the page_start addr */
3150 page_start = offset32 + written;
3151 page_start -= (page_start % bp->flash_info->page_size);
3152 /* Find the page_end addr */
3153 page_end = page_start + bp->flash_info->page_size;
3154 /* Find the data_start addr */
3155 data_start = (written == 0) ? offset32 : page_start;
3156 /* Find the data_end addr */
3157 data_end = (page_end > offset32 + len32) ?
3158 (offset32 + len32) : page_end;
3160 /* Request access to the flash interface. */
3161 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3162 goto nvram_write_end;
3164 /* Enable access to flash interface */
3165 bnx2_enable_nvram_access(bp);
3167 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3168 if (bp->flash_info->buffered == 0) {
3171 /* Read the whole page into the buffer
3172 * (non-buffer flash only) */
3173 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3174 if (j == (bp->flash_info->page_size - 4)) {
3175 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3177 rc = bnx2_nvram_read_dword(bp,
3183 goto nvram_write_end;
3189 /* Enable writes to flash interface (unlock write-protect) */
3190 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3191 goto nvram_write_end;
3193 /* Erase the page */
3194 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3195 goto nvram_write_end;
3197 /* Re-enable the write again for the actual write */
3198 bnx2_enable_nvram_write(bp);
3200 /* Loop to write back the buffer data from page_start to
3203 if (bp->flash_info->buffered == 0) {
3204 for (addr = page_start; addr < data_start;
3205 addr += 4, i += 4) {
3207 rc = bnx2_nvram_write_dword(bp, addr,
3208 &flash_buffer[i], cmd_flags);
3211 goto nvram_write_end;
3217 /* Loop to write the new data from data_start to data_end */
3218 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3219 if ((addr == page_end - 4) ||
3220 ((bp->flash_info->buffered) &&
3221 (addr == data_end - 4))) {
3223 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3225 rc = bnx2_nvram_write_dword(bp, addr, buf,
3229 goto nvram_write_end;
3235 /* Loop to write back the buffer data from data_end
3237 if (bp->flash_info->buffered == 0) {
3238 for (addr = data_end; addr < page_end;
3239 addr += 4, i += 4) {
3241 if (addr == page_end-4) {
3242 cmd_flags = BNX2_NVM_COMMAND_LAST;
3244 rc = bnx2_nvram_write_dword(bp, addr,
3245 &flash_buffer[i], cmd_flags);