1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x8000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.6.9"
60 #define DRV_MODULE_RELDATE "December 8, 2007"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
290 spin_unlock_bh(&bp->indirect_lock);
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
314 for (i = 0; i < 50; i++) {
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
371 for (i = 0; i < 50; i++) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 bnx2_disable_int(struct bnx2 *bp)
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
408 bnx2_enable_int(struct bnx2 *bp)
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
421 bnx2_disable_int_sync(struct bnx2 *bp)
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
429 bnx2_netif_stop(struct bnx2 *bp)
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
433 napi_disable(&bp->napi);
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
440 bnx2_netif_start(struct bnx2 *bp)
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
445 napi_enable(&bp->napi);
452 bnx2_free_mem(struct bnx2 *bp)
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
464 if (bp->status_blk) {
465 pci_free_consistent(bp->pdev, bp->status_stats_size,
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
468 bp->stats_blk = NULL;
470 if (bp->tx_desc_ring) {
471 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
472 bp->tx_desc_ring, bp->tx_desc_mapping);
473 bp->tx_desc_ring = NULL;
475 kfree(bp->tx_buf_ring);
476 bp->tx_buf_ring = NULL;
477 for (i = 0; i < bp->rx_max_ring; i++) {
478 if (bp->rx_desc_ring[i])
479 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
484 vfree(bp->rx_buf_ring);
485 bp->rx_buf_ring = NULL;
489 bnx2_alloc_mem(struct bnx2 *bp)
491 int i, status_blk_size;
493 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
494 if (bp->tx_buf_ring == NULL)
497 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
498 &bp->tx_desc_mapping);
499 if (bp->tx_desc_ring == NULL)
502 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
503 if (bp->rx_buf_ring == NULL)
506 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
508 for (i = 0; i < bp->rx_max_ring; i++) {
509 bp->rx_desc_ring[i] =
510 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
511 &bp->rx_desc_mapping[i]);
512 if (bp->rx_desc_ring[i] == NULL)
517 /* Combine status and statistics blocks into one allocation. */
518 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
519 bp->status_stats_size = status_blk_size +
520 sizeof(struct statistics_block);
522 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
523 &bp->status_blk_mapping);
524 if (bp->status_blk == NULL)
527 memset(bp->status_blk, 0, bp->status_stats_size);
529 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
532 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
534 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
535 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
536 if (bp->ctx_pages == 0)
538 for (i = 0; i < bp->ctx_pages; i++) {
539 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
541 &bp->ctx_blk_mapping[i]);
542 if (bp->ctx_blk[i] == NULL)
554 bnx2_report_fw_link(struct bnx2 *bp)
556 u32 fw_link_status = 0;
558 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
564 switch (bp->line_speed) {
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_10HALF;
569 fw_link_status = BNX2_LINK_STATUS_10FULL;
572 if (bp->duplex == DUPLEX_HALF)
573 fw_link_status = BNX2_LINK_STATUS_100HALF;
575 fw_link_status = BNX2_LINK_STATUS_100FULL;
578 if (bp->duplex == DUPLEX_HALF)
579 fw_link_status = BNX2_LINK_STATUS_1000HALF;
581 fw_link_status = BNX2_LINK_STATUS_1000FULL;
584 if (bp->duplex == DUPLEX_HALF)
585 fw_link_status = BNX2_LINK_STATUS_2500HALF;
587 fw_link_status = BNX2_LINK_STATUS_2500FULL;
591 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
594 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
596 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
597 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
599 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
600 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
601 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
603 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
607 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
609 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
613 bnx2_xceiver_str(struct bnx2 *bp)
615 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
616 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
621 bnx2_report_link(struct bnx2 *bp)
624 netif_carrier_on(bp->dev);
625 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
626 bnx2_xceiver_str(bp));
628 printk("%d Mbps ", bp->line_speed);
630 if (bp->duplex == DUPLEX_FULL)
631 printk("full duplex");
633 printk("half duplex");
636 if (bp->flow_ctrl & FLOW_CTRL_RX) {
637 printk(", receive ");
638 if (bp->flow_ctrl & FLOW_CTRL_TX)
639 printk("& transmit ");
642 printk(", transmit ");
644 printk("flow control ON");
649 netif_carrier_off(bp->dev);
650 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
651 bnx2_xceiver_str(bp));
654 bnx2_report_fw_link(bp);
658 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
660 u32 local_adv, remote_adv;
663 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
664 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
666 if (bp->duplex == DUPLEX_FULL) {
667 bp->flow_ctrl = bp->req_flow_ctrl;
672 if (bp->duplex != DUPLEX_FULL) {
676 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
677 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
680 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
681 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
682 bp->flow_ctrl |= FLOW_CTRL_TX;
683 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
684 bp->flow_ctrl |= FLOW_CTRL_RX;
688 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
689 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
691 if (bp->phy_flags & PHY_SERDES_FLAG) {
692 u32 new_local_adv = 0;
693 u32 new_remote_adv = 0;
695 if (local_adv & ADVERTISE_1000XPAUSE)
696 new_local_adv |= ADVERTISE_PAUSE_CAP;
697 if (local_adv & ADVERTISE_1000XPSE_ASYM)
698 new_local_adv |= ADVERTISE_PAUSE_ASYM;
699 if (remote_adv & ADVERTISE_1000XPAUSE)
700 new_remote_adv |= ADVERTISE_PAUSE_CAP;
701 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
702 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
704 local_adv = new_local_adv;
705 remote_adv = new_remote_adv;
708 /* See Table 28B-3 of 802.3ab-1999 spec. */
709 if (local_adv & ADVERTISE_PAUSE_CAP) {
710 if(local_adv & ADVERTISE_PAUSE_ASYM) {
711 if (remote_adv & ADVERTISE_PAUSE_CAP) {
712 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
714 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
715 bp->flow_ctrl = FLOW_CTRL_RX;
719 if (remote_adv & ADVERTISE_PAUSE_CAP) {
720 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
724 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
725 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
726 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
728 bp->flow_ctrl = FLOW_CTRL_TX;
734 bnx2_5709s_linkup(struct bnx2 *bp)
740 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
741 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
742 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
744 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
745 bp->line_speed = bp->req_line_speed;
746 bp->duplex = bp->req_duplex;
749 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
751 case MII_BNX2_GP_TOP_AN_SPEED_10:
752 bp->line_speed = SPEED_10;
754 case MII_BNX2_GP_TOP_AN_SPEED_100:
755 bp->line_speed = SPEED_100;
757 case MII_BNX2_GP_TOP_AN_SPEED_1G:
758 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
759 bp->line_speed = SPEED_1000;
761 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
762 bp->line_speed = SPEED_2500;
765 if (val & MII_BNX2_GP_TOP_AN_FD)
766 bp->duplex = DUPLEX_FULL;
768 bp->duplex = DUPLEX_HALF;
773 bnx2_5708s_linkup(struct bnx2 *bp)
778 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
779 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
780 case BCM5708S_1000X_STAT1_SPEED_10:
781 bp->line_speed = SPEED_10;
783 case BCM5708S_1000X_STAT1_SPEED_100:
784 bp->line_speed = SPEED_100;
786 case BCM5708S_1000X_STAT1_SPEED_1G:
787 bp->line_speed = SPEED_1000;
789 case BCM5708S_1000X_STAT1_SPEED_2G5:
790 bp->line_speed = SPEED_2500;
793 if (val & BCM5708S_1000X_STAT1_FD)
794 bp->duplex = DUPLEX_FULL;
796 bp->duplex = DUPLEX_HALF;
802 bnx2_5706s_linkup(struct bnx2 *bp)
804 u32 bmcr, local_adv, remote_adv, common;
807 bp->line_speed = SPEED_1000;
809 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
810 if (bmcr & BMCR_FULLDPLX) {
811 bp->duplex = DUPLEX_FULL;
814 bp->duplex = DUPLEX_HALF;
817 if (!(bmcr & BMCR_ANENABLE)) {
821 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
822 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
824 common = local_adv & remote_adv;
825 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
827 if (common & ADVERTISE_1000XFULL) {
828 bp->duplex = DUPLEX_FULL;
831 bp->duplex = DUPLEX_HALF;
839 bnx2_copper_linkup(struct bnx2 *bp)
843 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
844 if (bmcr & BMCR_ANENABLE) {
845 u32 local_adv, remote_adv, common;
847 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
848 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
850 common = local_adv & (remote_adv >> 2);
851 if (common & ADVERTISE_1000FULL) {
852 bp->line_speed = SPEED_1000;
853 bp->duplex = DUPLEX_FULL;
855 else if (common & ADVERTISE_1000HALF) {
856 bp->line_speed = SPEED_1000;
857 bp->duplex = DUPLEX_HALF;
860 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
861 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
863 common = local_adv & remote_adv;
864 if (common & ADVERTISE_100FULL) {
865 bp->line_speed = SPEED_100;
866 bp->duplex = DUPLEX_FULL;
868 else if (common & ADVERTISE_100HALF) {
869 bp->line_speed = SPEED_100;
870 bp->duplex = DUPLEX_HALF;
872 else if (common & ADVERTISE_10FULL) {
873 bp->line_speed = SPEED_10;
874 bp->duplex = DUPLEX_FULL;
876 else if (common & ADVERTISE_10HALF) {
877 bp->line_speed = SPEED_10;
878 bp->duplex = DUPLEX_HALF;
887 if (bmcr & BMCR_SPEED100) {
888 bp->line_speed = SPEED_100;
891 bp->line_speed = SPEED_10;
893 if (bmcr & BMCR_FULLDPLX) {
894 bp->duplex = DUPLEX_FULL;
897 bp->duplex = DUPLEX_HALF;
905 bnx2_set_mac_link(struct bnx2 *bp)
909 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
910 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
911 (bp->duplex == DUPLEX_HALF)) {
912 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
915 /* Configure the EMAC mode register. */
916 val = REG_RD(bp, BNX2_EMAC_MODE);
918 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
919 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
920 BNX2_EMAC_MODE_25G_MODE);
923 switch (bp->line_speed) {
925 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
926 val |= BNX2_EMAC_MODE_PORT_MII_10M;
931 val |= BNX2_EMAC_MODE_PORT_MII;
934 val |= BNX2_EMAC_MODE_25G_MODE;
937 val |= BNX2_EMAC_MODE_PORT_GMII;
942 val |= BNX2_EMAC_MODE_PORT_GMII;
945 /* Set the MAC to operate in the appropriate duplex mode. */
946 if (bp->duplex == DUPLEX_HALF)
947 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
948 REG_WR(bp, BNX2_EMAC_MODE, val);
950 /* Enable/disable rx PAUSE. */
951 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
953 if (bp->flow_ctrl & FLOW_CTRL_RX)
954 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
955 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
957 /* Enable/disable tx PAUSE. */
958 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
959 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
961 if (bp->flow_ctrl & FLOW_CTRL_TX)
962 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
963 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
965 /* Acknowledge the interrupt. */
966 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
972 bnx2_enable_bmsr1(struct bnx2 *bp)
974 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
975 (CHIP_NUM(bp) == CHIP_NUM_5709))
976 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
977 MII_BNX2_BLK_ADDR_GP_STATUS);
981 bnx2_disable_bmsr1(struct bnx2 *bp)
983 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
984 (CHIP_NUM(bp) == CHIP_NUM_5709))
985 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
986 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
990 bnx2_test_and_enable_2g5(struct bnx2 *bp)
995 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
998 if (bp->autoneg & AUTONEG_SPEED)
999 bp->advertising |= ADVERTISED_2500baseX_Full;
1001 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1002 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1004 bnx2_read_phy(bp, bp->mii_up1, &up1);
1005 if (!(up1 & BCM5708S_UP1_2G5)) {
1006 up1 |= BCM5708S_UP1_2G5;
1007 bnx2_write_phy(bp, bp->mii_up1, up1);
1011 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1012 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1013 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1019 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1024 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1027 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1028 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1030 bnx2_read_phy(bp, bp->mii_up1, &up1);
1031 if (up1 & BCM5708S_UP1_2G5) {
1032 up1 &= ~BCM5708S_UP1_2G5;
1033 bnx2_write_phy(bp, bp->mii_up1, up1);
1037 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1038 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1039 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1045 bnx2_enable_forced_2g5(struct bnx2 *bp)
1049 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1052 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1055 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056 MII_BNX2_BLK_ADDR_SERDES_DIG);
1057 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1058 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1059 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1060 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1062 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1063 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1064 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1066 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1067 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1068 bmcr |= BCM5708S_BMCR_FORCE_2500;
1071 if (bp->autoneg & AUTONEG_SPEED) {
1072 bmcr &= ~BMCR_ANENABLE;
1073 if (bp->req_duplex == DUPLEX_FULL)
1074 bmcr |= BMCR_FULLDPLX;
1076 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1080 bnx2_disable_forced_2g5(struct bnx2 *bp)
1084 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1087 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1090 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1091 MII_BNX2_BLK_ADDR_SERDES_DIG);
1092 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1093 val &= ~MII_BNX2_SD_MISC1_FORCE;
1094 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1096 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1097 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1100 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1101 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1102 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1105 if (bp->autoneg & AUTONEG_SPEED)
1106 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1107 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1111 bnx2_set_link(struct bnx2 *bp)
1116 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1121 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1124 link_up = bp->link_up;
1126 bnx2_enable_bmsr1(bp);
1127 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1128 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1129 bnx2_disable_bmsr1(bp);
1131 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1132 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1135 val = REG_RD(bp, BNX2_EMAC_STATUS);
1136 if (val & BNX2_EMAC_STATUS_LINK)
1137 bmsr |= BMSR_LSTATUS;
1139 bmsr &= ~BMSR_LSTATUS;
1142 if (bmsr & BMSR_LSTATUS) {
1145 if (bp->phy_flags & PHY_SERDES_FLAG) {
1146 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1147 bnx2_5706s_linkup(bp);
1148 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1149 bnx2_5708s_linkup(bp);
1150 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1151 bnx2_5709s_linkup(bp);
1154 bnx2_copper_linkup(bp);
1156 bnx2_resolve_flow_ctrl(bp);
1159 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1160 (bp->autoneg & AUTONEG_SPEED))
1161 bnx2_disable_forced_2g5(bp);
1163 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1167 if (bp->link_up != link_up) {
1168 bnx2_report_link(bp);
1171 bnx2_set_mac_link(bp);
1177 bnx2_reset_phy(struct bnx2 *bp)
1182 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1184 #define PHY_RESET_MAX_WAIT 100
1185 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1188 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1189 if (!(reg & BMCR_RESET)) {
1194 if (i == PHY_RESET_MAX_WAIT) {
1201 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1205 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1206 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1208 if (bp->phy_flags & PHY_SERDES_FLAG) {
1209 adv = ADVERTISE_1000XPAUSE;
1212 adv = ADVERTISE_PAUSE_CAP;
1215 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1216 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217 adv = ADVERTISE_1000XPSE_ASYM;
1220 adv = ADVERTISE_PAUSE_ASYM;
1223 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1228 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1234 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1237 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1239 u32 speed_arg = 0, pause_adv;
1241 pause_adv = bnx2_phy_get_pause_adv(bp);
1243 if (bp->autoneg & AUTONEG_SPEED) {
1244 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1245 if (bp->advertising & ADVERTISED_10baseT_Half)
1246 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1247 if (bp->advertising & ADVERTISED_10baseT_Full)
1248 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1249 if (bp->advertising & ADVERTISED_100baseT_Half)
1250 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1251 if (bp->advertising & ADVERTISED_100baseT_Full)
1252 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1253 if (bp->advertising & ADVERTISED_1000baseT_Full)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1255 if (bp->advertising & ADVERTISED_2500baseX_Full)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1258 if (bp->req_line_speed == SPEED_2500)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1260 else if (bp->req_line_speed == SPEED_1000)
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1262 else if (bp->req_line_speed == SPEED_100) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1267 } else if (bp->req_line_speed == SPEED_10) {
1268 if (bp->req_duplex == DUPLEX_FULL)
1269 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1271 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1275 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1276 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1277 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1278 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1280 if (port == PORT_TP)
1281 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1282 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1284 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1286 spin_unlock_bh(&bp->phy_lock);
1287 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1288 spin_lock_bh(&bp->phy_lock);
1294 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1299 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1300 return (bnx2_setup_remote_phy(bp, port));
1302 if (!(bp->autoneg & AUTONEG_SPEED)) {
1304 int force_link_down = 0;
1306 if (bp->req_line_speed == SPEED_2500) {
1307 if (!bnx2_test_and_enable_2g5(bp))
1308 force_link_down = 1;
1309 } else if (bp->req_line_speed == SPEED_1000) {
1310 if (bnx2_test_and_disable_2g5(bp))
1311 force_link_down = 1;
1313 bnx2_read_phy(bp, bp->mii_adv, &adv);
1314 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1316 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1317 new_bmcr = bmcr & ~BMCR_ANENABLE;
1318 new_bmcr |= BMCR_SPEED1000;
1320 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1321 if (bp->req_line_speed == SPEED_2500)
1322 bnx2_enable_forced_2g5(bp);
1323 else if (bp->req_line_speed == SPEED_1000) {
1324 bnx2_disable_forced_2g5(bp);
1325 new_bmcr &= ~0x2000;
1328 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1329 if (bp->req_line_speed == SPEED_2500)
1330 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1332 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1335 if (bp->req_duplex == DUPLEX_FULL) {
1336 adv |= ADVERTISE_1000XFULL;
1337 new_bmcr |= BMCR_FULLDPLX;
1340 adv |= ADVERTISE_1000XHALF;
1341 new_bmcr &= ~BMCR_FULLDPLX;
1343 if ((new_bmcr != bmcr) || (force_link_down)) {
1344 /* Force a link down visible on the other side */
1346 bnx2_write_phy(bp, bp->mii_adv, adv &
1347 ~(ADVERTISE_1000XFULL |
1348 ADVERTISE_1000XHALF));
1349 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1350 BMCR_ANRESTART | BMCR_ANENABLE);
1353 netif_carrier_off(bp->dev);
1354 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1355 bnx2_report_link(bp);
1357 bnx2_write_phy(bp, bp->mii_adv, adv);
1358 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1360 bnx2_resolve_flow_ctrl(bp);
1361 bnx2_set_mac_link(bp);
1366 bnx2_test_and_enable_2g5(bp);
1368 if (bp->advertising & ADVERTISED_1000baseT_Full)
1369 new_adv |= ADVERTISE_1000XFULL;
1371 new_adv |= bnx2_phy_get_pause_adv(bp);
1373 bnx2_read_phy(bp, bp->mii_adv, &adv);
1374 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1376 bp->serdes_an_pending = 0;
1377 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1378 /* Force a link down visible on the other side */
1380 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1381 spin_unlock_bh(&bp->phy_lock);
1383 spin_lock_bh(&bp->phy_lock);
1386 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1387 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1389 /* Speed up link-up time when the link partner
1390 * does not autonegotiate which is very common
1391 * in blade servers. Some blade servers use
1392 * IPMI for kerboard input and it's important
1393 * to minimize link disruptions. Autoneg. involves
1394 * exchanging base pages plus 3 next pages and
1395 * normally completes in about 120 msec.
1397 bp->current_interval = SERDES_AN_TIMEOUT;
1398 bp->serdes_an_pending = 1;
1399 mod_timer(&bp->timer, jiffies + bp->current_interval);
1401 bnx2_resolve_flow_ctrl(bp);
1402 bnx2_set_mac_link(bp);
1408 #define ETHTOOL_ALL_FIBRE_SPEED \
1409 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1410 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1411 (ADVERTISED_1000baseT_Full)
1413 #define ETHTOOL_ALL_COPPER_SPEED \
1414 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1415 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1416 ADVERTISED_1000baseT_Full)
1418 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1419 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1421 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1424 bnx2_set_default_remote_link(struct bnx2 *bp)
1428 if (bp->phy_port == PORT_TP)
1429 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1431 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1433 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1434 bp->req_line_speed = 0;
1435 bp->autoneg |= AUTONEG_SPEED;
1436 bp->advertising = ADVERTISED_Autoneg;
1437 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1438 bp->advertising |= ADVERTISED_10baseT_Half;
1439 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1440 bp->advertising |= ADVERTISED_10baseT_Full;
1441 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1442 bp->advertising |= ADVERTISED_100baseT_Half;
1443 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1444 bp->advertising |= ADVERTISED_100baseT_Full;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1446 bp->advertising |= ADVERTISED_1000baseT_Full;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1448 bp->advertising |= ADVERTISED_2500baseX_Full;
1451 bp->advertising = 0;
1452 bp->req_duplex = DUPLEX_FULL;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1454 bp->req_line_speed = SPEED_10;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1459 bp->req_line_speed = SPEED_100;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1461 bp->req_duplex = DUPLEX_HALF;
1463 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1464 bp->req_line_speed = SPEED_1000;
1465 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1466 bp->req_line_speed = SPEED_2500;
1471 bnx2_set_default_link(struct bnx2 *bp)
1473 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1474 return bnx2_set_default_remote_link(bp);
1476 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1477 bp->req_line_speed = 0;
1478 if (bp->phy_flags & PHY_SERDES_FLAG) {
1481 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1483 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1484 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1485 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1487 bp->req_line_speed = bp->line_speed = SPEED_1000;
1488 bp->req_duplex = DUPLEX_FULL;
1491 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1495 bnx2_send_heart_beat(struct bnx2 *bp)
1500 spin_lock(&bp->indirect_lock);
1501 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1502 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1503 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1504 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1505 spin_unlock(&bp->indirect_lock);
1509 bnx2_remote_phy_event(struct bnx2 *bp)
1512 u8 link_up = bp->link_up;
1515 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1517 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1518 bnx2_send_heart_beat(bp);
1520 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1522 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1528 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1529 bp->duplex = DUPLEX_FULL;
1531 case BNX2_LINK_STATUS_10HALF:
1532 bp->duplex = DUPLEX_HALF;
1533 case BNX2_LINK_STATUS_10FULL:
1534 bp->line_speed = SPEED_10;
1536 case BNX2_LINK_STATUS_100HALF:
1537 bp->duplex = DUPLEX_HALF;
1538 case BNX2_LINK_STATUS_100BASE_T4:
1539 case BNX2_LINK_STATUS_100FULL:
1540 bp->line_speed = SPEED_100;
1542 case BNX2_LINK_STATUS_1000HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_1000FULL:
1545 bp->line_speed = SPEED_1000;
1547 case BNX2_LINK_STATUS_2500HALF:
1548 bp->duplex = DUPLEX_HALF;
1549 case BNX2_LINK_STATUS_2500FULL:
1550 bp->line_speed = SPEED_2500;
1557 spin_lock(&bp->phy_lock);
1559 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1560 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1561 if (bp->duplex == DUPLEX_FULL)
1562 bp->flow_ctrl = bp->req_flow_ctrl;
1564 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1565 bp->flow_ctrl |= FLOW_CTRL_TX;
1566 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1567 bp->flow_ctrl |= FLOW_CTRL_RX;
1570 old_port = bp->phy_port;
1571 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1572 bp->phy_port = PORT_FIBRE;
1574 bp->phy_port = PORT_TP;
1576 if (old_port != bp->phy_port)
1577 bnx2_set_default_link(bp);
1579 spin_unlock(&bp->phy_lock);
1581 if (bp->link_up != link_up)
1582 bnx2_report_link(bp);
1584 bnx2_set_mac_link(bp);
1588 bnx2_set_remote_link(struct bnx2 *bp)
1592 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1594 case BNX2_FW_EVT_CODE_LINK_EVENT:
1595 bnx2_remote_phy_event(bp);
1597 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1599 bnx2_send_heart_beat(bp);
1606 bnx2_setup_copper_phy(struct bnx2 *bp)
1611 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1613 if (bp->autoneg & AUTONEG_SPEED) {
1614 u32 adv_reg, adv1000_reg;
1615 u32 new_adv_reg = 0;
1616 u32 new_adv1000_reg = 0;
1618 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1619 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1620 ADVERTISE_PAUSE_ASYM);
1622 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1623 adv1000_reg &= PHY_ALL_1000_SPEED;
1625 if (bp->advertising & ADVERTISED_10baseT_Half)
1626 new_adv_reg |= ADVERTISE_10HALF;
1627 if (bp->advertising & ADVERTISED_10baseT_Full)
1628 new_adv_reg |= ADVERTISE_10FULL;
1629 if (bp->advertising & ADVERTISED_100baseT_Half)
1630 new_adv_reg |= ADVERTISE_100HALF;
1631 if (bp->advertising & ADVERTISED_100baseT_Full)
1632 new_adv_reg |= ADVERTISE_100FULL;
1633 if (bp->advertising & ADVERTISED_1000baseT_Full)
1634 new_adv1000_reg |= ADVERTISE_1000FULL;
1636 new_adv_reg |= ADVERTISE_CSMA;
1638 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1640 if ((adv1000_reg != new_adv1000_reg) ||
1641 (adv_reg != new_adv_reg) ||
1642 ((bmcr & BMCR_ANENABLE) == 0)) {
1644 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1645 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1646 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1649 else if (bp->link_up) {
1650 /* Flow ctrl may have changed from auto to forced */
1651 /* or vice-versa. */
1653 bnx2_resolve_flow_ctrl(bp);
1654 bnx2_set_mac_link(bp);
1660 if (bp->req_line_speed == SPEED_100) {
1661 new_bmcr |= BMCR_SPEED100;
1663 if (bp->req_duplex == DUPLEX_FULL) {
1664 new_bmcr |= BMCR_FULLDPLX;
1666 if (new_bmcr != bmcr) {
1669 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1670 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1672 if (bmsr & BMSR_LSTATUS) {
1673 /* Force link down */
1674 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1675 spin_unlock_bh(&bp->phy_lock);
1677 spin_lock_bh(&bp->phy_lock);
1679 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1680 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1683 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1685 /* Normally, the new speed is setup after the link has
1686 * gone down and up again. In some cases, link will not go
1687 * down so we need to set up the new speed here.
1689 if (bmsr & BMSR_LSTATUS) {
1690 bp->line_speed = bp->req_line_speed;
1691 bp->duplex = bp->req_duplex;
1692 bnx2_resolve_flow_ctrl(bp);
1693 bnx2_set_mac_link(bp);
1696 bnx2_resolve_flow_ctrl(bp);
1697 bnx2_set_mac_link(bp);
1703 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1705 if (bp->loopback == MAC_LOOPBACK)
1708 if (bp->phy_flags & PHY_SERDES_FLAG) {
1709 return (bnx2_setup_serdes_phy(bp, port));
1712 return (bnx2_setup_copper_phy(bp));
1717 bnx2_init_5709s_phy(struct bnx2 *bp)
1721 bp->mii_bmcr = MII_BMCR + 0x10;
1722 bp->mii_bmsr = MII_BMSR + 0x10;
1723 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1724 bp->mii_adv = MII_ADVERTISE + 0x10;
1725 bp->mii_lpa = MII_LPA + 0x10;
1726 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1728 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1729 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1731 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1734 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1736 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1737 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1738 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1739 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1741 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1742 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1743 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1744 val |= BCM5708S_UP1_2G5;
1746 val &= ~BCM5708S_UP1_2G5;
1747 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1750 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1751 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1752 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1754 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1756 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1757 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1758 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1766 bnx2_init_5708s_phy(struct bnx2 *bp)
1772 bp->mii_up1 = BCM5708S_UP1;
1774 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1775 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1776 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1778 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1779 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1780 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1782 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1783 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1784 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1786 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1787 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1788 val |= BCM5708S_UP1_2G5;
1789 bnx2_write_phy(bp, BCM5708S_UP1, val);
1792 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1793 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1794 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1795 /* increase tx signal amplitude */
1796 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1797 BCM5708S_BLK_ADDR_TX_MISC);
1798 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1799 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1800 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1801 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1804 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1805 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1810 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1811 BNX2_SHARED_HW_CFG_CONFIG);
1812 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1813 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1814 BCM5708S_BLK_ADDR_TX_MISC);
1815 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1816 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1817 BCM5708S_BLK_ADDR_DIG);
1824 bnx2_init_5706s_phy(struct bnx2 *bp)
1828 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1830 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1831 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1833 if (bp->dev->mtu > 1500) {
1836 /* Set extended packet length bit */
1837 bnx2_write_phy(bp, 0x18, 0x7);
1838 bnx2_read_phy(bp, 0x18, &val);
1839 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1841 bnx2_write_phy(bp, 0x1c, 0x6c00);
1842 bnx2_read_phy(bp, 0x1c, &val);
1843 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1848 bnx2_write_phy(bp, 0x18, 0x7);
1849 bnx2_read_phy(bp, 0x18, &val);
1850 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1852 bnx2_write_phy(bp, 0x1c, 0x6c00);
1853 bnx2_read_phy(bp, 0x1c, &val);
1854 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1861 bnx2_init_copper_phy(struct bnx2 *bp)
1867 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1868 bnx2_write_phy(bp, 0x18, 0x0c00);
1869 bnx2_write_phy(bp, 0x17, 0x000a);
1870 bnx2_write_phy(bp, 0x15, 0x310b);
1871 bnx2_write_phy(bp, 0x17, 0x201f);
1872 bnx2_write_phy(bp, 0x15, 0x9506);
1873 bnx2_write_phy(bp, 0x17, 0x401f);
1874 bnx2_write_phy(bp, 0x15, 0x14e2);
1875 bnx2_write_phy(bp, 0x18, 0x0400);
1878 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1879 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1880 MII_BNX2_DSP_EXPAND_REG | 0x8);
1881 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1883 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1886 if (bp->dev->mtu > 1500) {
1887 /* Set extended packet length bit */
1888 bnx2_write_phy(bp, 0x18, 0x7);
1889 bnx2_read_phy(bp, 0x18, &val);
1890 bnx2_write_phy(bp, 0x18, val | 0x4000);
1892 bnx2_read_phy(bp, 0x10, &val);
1893 bnx2_write_phy(bp, 0x10, val | 0x1);
1896 bnx2_write_phy(bp, 0x18, 0x7);
1897 bnx2_read_phy(bp, 0x18, &val);
1898 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1900 bnx2_read_phy(bp, 0x10, &val);
1901 bnx2_write_phy(bp, 0x10, val & ~0x1);
1904 /* ethernet@wirespeed */
1905 bnx2_write_phy(bp, 0x18, 0x7007);
1906 bnx2_read_phy(bp, 0x18, &val);
1907 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1913 bnx2_init_phy(struct bnx2 *bp)
1918 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1919 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1921 bp->mii_bmcr = MII_BMCR;
1922 bp->mii_bmsr = MII_BMSR;
1923 bp->mii_bmsr1 = MII_BMSR;
1924 bp->mii_adv = MII_ADVERTISE;
1925 bp->mii_lpa = MII_LPA;
1927 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1929 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1932 bnx2_read_phy(bp, MII_PHYSID1, &val);
1933 bp->phy_id = val << 16;
1934 bnx2_read_phy(bp, MII_PHYSID2, &val);
1935 bp->phy_id |= val & 0xffff;
1937 if (bp->phy_flags & PHY_SERDES_FLAG) {
1938 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1939 rc = bnx2_init_5706s_phy(bp);
1940 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1941 rc = bnx2_init_5708s_phy(bp);
1942 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1943 rc = bnx2_init_5709s_phy(bp);
1946 rc = bnx2_init_copper_phy(bp);
1951 rc = bnx2_setup_phy(bp, bp->phy_port);
1957 bnx2_set_mac_loopback(struct bnx2 *bp)
1961 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1962 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1963 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1964 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1969 static int bnx2_test_link(struct bnx2 *);
1972 bnx2_set_phy_loopback(struct bnx2 *bp)
1977 spin_lock_bh(&bp->phy_lock);
1978 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1980 spin_unlock_bh(&bp->phy_lock);
1984 for (i = 0; i < 10; i++) {
1985 if (bnx2_test_link(bp) == 0)
1990 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1992 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1993 BNX2_EMAC_MODE_25G_MODE);
1995 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1996 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2002 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2008 msg_data |= bp->fw_wr_seq;
2010 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2012 /* wait for an acknowledgement. */
2013 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2016 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2018 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2021 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2024 /* If we timed out, inform the firmware that this is the case. */
2025 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2027 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2030 msg_data &= ~BNX2_DRV_MSG_CODE;
2031 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2033 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2038 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2045 bnx2_init_5709_context(struct bnx2 *bp)
2050 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2051 val |= (BCM_PAGE_BITS - 8) << 16;
2052 REG_WR(bp, BNX2_CTX_COMMAND, val);
2053 for (i = 0; i < 10; i++) {
2054 val = REG_RD(bp, BNX2_CTX_COMMAND);
2055 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2059 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2062 for (i = 0; i < bp->ctx_pages; i++) {
2065 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2066 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2067 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2068 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2069 (u64) bp->ctx_blk_mapping[i] >> 32);
2070 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2071 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2072 for (j = 0; j < 10; j++) {
2074 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2075 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2079 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2088 bnx2_init_context(struct bnx2 *bp)
2094 u32 vcid_addr, pcid_addr, offset;
2099 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2102 vcid_addr = GET_PCID_ADDR(vcid);
2104 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2109 pcid_addr = GET_PCID_ADDR(new_vcid);
2112 vcid_addr = GET_CID_ADDR(vcid);
2113 pcid_addr = vcid_addr;
2116 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2117 vcid_addr += (i << PHY_CTX_SHIFT);
2118 pcid_addr += (i << PHY_CTX_SHIFT);
2120 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2121 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2123 /* Zero out the context. */
2124 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2125 CTX_WR(bp, 0x00, offset, 0);
2127 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2128 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2134 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2140 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2141 if (good_mbuf == NULL) {
2142 printk(KERN_ERR PFX "Failed to allocate memory in "
2143 "bnx2_alloc_bad_rbuf\n");
2147 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2148 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2152 /* Allocate a bunch of mbufs and save the good ones in an array. */
2153 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2154 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2155 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2157 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2159 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2161 /* The addresses with Bit 9 set are bad memory blocks. */
2162 if (!(val & (1 << 9))) {
2163 good_mbuf[good_mbuf_cnt] = (u16) val;
2167 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2170 /* Free the good ones back to the mbuf pool thus discarding
2171 * all the bad ones. */
2172 while (good_mbuf_cnt) {
2175 val = good_mbuf[good_mbuf_cnt];
2176 val = (val << 9) | val | 1;
2178 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2185 bnx2_set_mac_addr(struct bnx2 *bp)
2188 u8 *mac_addr = bp->dev->dev_addr;
2190 val = (mac_addr[0] << 8) | mac_addr[1];
2192 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2194 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2195 (mac_addr[4] << 8) | mac_addr[5];
2197 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2201 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2203 struct sk_buff *skb;
2204 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2206 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2207 unsigned long align;
2209 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2214 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2215 skb_reserve(skb, BNX2_RX_ALIGN - align);
2217 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2218 PCI_DMA_FROMDEVICE);
2221 pci_unmap_addr_set(rx_buf, mapping, mapping);
2223 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2224 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2226 bp->rx_prod_bseq += bp->rx_buf_use_size;
2232 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2234 struct status_block *sblk = bp->status_blk;
2235 u32 new_link_state, old_link_state;
2238 new_link_state = sblk->status_attn_bits & event;
2239 old_link_state = sblk->status_attn_bits_ack & event;
2240 if (new_link_state != old_link_state) {
2242 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2244 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2252 bnx2_phy_int(struct bnx2 *bp)
2254 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2255 spin_lock(&bp->phy_lock);
2257 spin_unlock(&bp->phy_lock);
2259 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2260 bnx2_set_remote_link(bp);
2265 bnx2_tx_int(struct bnx2 *bp)
2267 struct status_block *sblk = bp->status_blk;
2268 u16 hw_cons, sw_cons, sw_ring_cons;
2271 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2272 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2275 sw_cons = bp->tx_cons;
2277 while (sw_cons != hw_cons) {
2278 struct sw_bd *tx_buf;
2279 struct sk_buff *skb;
2282 sw_ring_cons = TX_RING_IDX(sw_cons);
2284 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2287 /* partial BD completions possible with TSO packets */
2288 if (skb_is_gso(skb)) {
2289 u16 last_idx, last_ring_idx;
2291 last_idx = sw_cons +
2292 skb_shinfo(skb)->nr_frags + 1;
2293 last_ring_idx = sw_ring_cons +
2294 skb_shinfo(skb)->nr_frags + 1;
2295 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2298 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2303 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2304 skb_headlen(skb), PCI_DMA_TODEVICE);
2307 last = skb_shinfo(skb)->nr_frags;
2309 for (i = 0; i < last; i++) {
2310 sw_cons = NEXT_TX_BD(sw_cons);
2312 pci_unmap_page(bp->pdev,
2314 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2316 skb_shinfo(skb)->frags[i].size,
2320 sw_cons = NEXT_TX_BD(sw_cons);
2322 tx_free_bd += last + 1;
2326 hw_cons = bp->hw_tx_cons =
2327 sblk->status_tx_quick_consumer_index0;
2329 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2334 bp->tx_cons = sw_cons;
2335 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2336 * before checking for netif_queue_stopped(). Without the
2337 * memory barrier, there is a small possibility that bnx2_start_xmit()
2338 * will miss it and cause the queue to be stopped forever.
2342 if (unlikely(netif_queue_stopped(bp->dev)) &&
2343 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2344 netif_tx_lock(bp->dev);
2345 if ((netif_queue_stopped(bp->dev)) &&
2346 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2347 netif_wake_queue(bp->dev);
2348 netif_tx_unlock(bp->dev);
2353 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2356 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2357 struct rx_bd *cons_bd, *prod_bd;
2359 cons_rx_buf = &bp->rx_buf_ring[cons];
2360 prod_rx_buf = &bp->rx_buf_ring[prod];
2362 pci_dma_sync_single_for_device(bp->pdev,
2363 pci_unmap_addr(cons_rx_buf, mapping),
2364 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2366 bp->rx_prod_bseq += bp->rx_buf_use_size;
2368 prod_rx_buf->skb = skb;
2373 pci_unmap_addr_set(prod_rx_buf, mapping,
2374 pci_unmap_addr(cons_rx_buf, mapping));
2376 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2377 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2378 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2379 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2383 bnx2_get_hw_rx_cons(struct bnx2 *bp)
2385 u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2387 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2393 bnx2_rx_int(struct bnx2 *bp, int budget)
2395 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2396 struct l2_fhdr *rx_hdr;
2399 hw_cons = bnx2_get_hw_rx_cons(bp);
2400 sw_cons = bp->rx_cons;
2401 sw_prod = bp->rx_prod;
2403 /* Memory barrier necessary as speculative reads of the rx
2404 * buffer can be ahead of the index in the status block
2407 while (sw_cons != hw_cons) {
2410 struct sw_bd *rx_buf;
2411 struct sk_buff *skb;
2412 dma_addr_t dma_addr;
2414 sw_ring_cons = RX_RING_IDX(sw_cons);
2415 sw_ring_prod = RX_RING_IDX(sw_prod);
2417 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2422 dma_addr = pci_unmap_addr(rx_buf, mapping);
2424 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2425 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2427 rx_hdr = (struct l2_fhdr *) skb->data;
2428 len = rx_hdr->l2_fhdr_pkt_len - 4;
2430 if ((status = rx_hdr->l2_fhdr_status) &
2431 (L2_FHDR_ERRORS_BAD_CRC |
2432 L2_FHDR_ERRORS_PHY_DECODE |
2433 L2_FHDR_ERRORS_ALIGNMENT |
2434 L2_FHDR_ERRORS_TOO_SHORT |
2435 L2_FHDR_ERRORS_GIANT_FRAME)) {
2440 /* Since we don't have a jumbo ring, copy small packets
2443 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2444 struct sk_buff *new_skb;
2446 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2447 if (new_skb == NULL)
2451 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2452 new_skb->data, len + 2);
2453 skb_reserve(new_skb, 2);
2454 skb_put(new_skb, len);
2456 bnx2_reuse_rx_skb(bp, skb,
2457 sw_ring_cons, sw_ring_prod);
2461 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2462 pci_unmap_single(bp->pdev, dma_addr,
2463 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2465 skb_reserve(skb, bp->rx_offset);
2470 bnx2_reuse_rx_skb(bp, skb,
2471 sw_ring_cons, sw_ring_prod);
2475 skb->protocol = eth_type_trans(skb, bp->dev);
2477 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2478 (ntohs(skb->protocol) != 0x8100)) {
2485 skb->ip_summed = CHECKSUM_NONE;
2487 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2488 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2490 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2491 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2492 skb->ip_summed = CHECKSUM_UNNECESSARY;
2496 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2497 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2498 rx_hdr->l2_fhdr_vlan_tag);
2502 netif_receive_skb(skb);
2504 bp->dev->last_rx = jiffies;
2508 sw_cons = NEXT_RX_BD(sw_cons);
2509 sw_prod = NEXT_RX_BD(sw_prod);
2511 if ((rx_pkt == budget))
2514 /* Refresh hw_cons to see if there is new work */
2515 if (sw_cons == hw_cons) {
2516 hw_cons = bnx2_get_hw_rx_cons(bp);
2520 bp->rx_cons = sw_cons;
2521 bp->rx_prod = sw_prod;
2523 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2525 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2533 /* MSI ISR - The only difference between this and the INTx ISR
2534 * is that the MSI interrupt is always serviced.
2537 bnx2_msi(int irq, void *dev_instance)
2539 struct net_device *dev = dev_instance;
2540 struct bnx2 *bp = netdev_priv(dev);
2542 prefetch(bp->status_blk);
2543 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2544 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2545 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2547 /* Return here if interrupt is disabled. */
2548 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2551 netif_rx_schedule(dev, &bp->napi);
2557 bnx2_msi_1shot(int irq, void *dev_instance)
2559 struct net_device *dev = dev_instance;
2560 struct bnx2 *bp = netdev_priv(dev);
2562 prefetch(bp->status_blk);
2564 /* Return here if interrupt is disabled. */
2565 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2568 netif_rx_schedule(dev, &bp->napi);
2574 bnx2_interrupt(int irq, void *dev_instance)
2576 struct net_device *dev = dev_instance;
2577 struct bnx2 *bp = netdev_priv(dev);
2578 struct status_block *sblk = bp->status_blk;
2580 /* When using INTx, it is possible for the interrupt to arrive
2581 * at the CPU before the status block posted prior to the
2582 * interrupt. Reading a register will flush the status block.
2583 * When using MSI, the MSI message will always complete after
2584 * the status block write.
2586 if ((sblk->status_idx == bp->last_status_idx) &&
2587 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2588 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2591 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2592 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2593 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2595 /* Read back to deassert IRQ immediately to avoid too many
2596 * spurious interrupts.
2598 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2600 /* Return here if interrupt is shared and is disabled. */
2601 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2604 if (netif_rx_schedule_prep(dev, &bp->napi)) {
2605 bp->last_status_idx = sblk->status_idx;
2606 __netif_rx_schedule(dev, &bp->napi);
2612 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2613 STATUS_ATTN_BITS_TIMER_ABORT)
2616 bnx2_has_work(struct bnx2 *bp)
2618 struct status_block *sblk = bp->status_blk;
2620 if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
2621 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2624 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2625 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2631 static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2633 struct status_block *sblk = bp->status_blk;
2634 u32 status_attn_bits = sblk->status_attn_bits;
2635 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2637 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2638 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2642 /* This is needed to take care of transient status
2643 * during link changes.
2645 REG_WR(bp, BNX2_HC_COMMAND,
2646 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2647 REG_RD(bp, BNX2_HC_COMMAND);
2650 if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2653 if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
2654 work_done += bnx2_rx_int(bp, budget - work_done);
2659 static int bnx2_poll(struct napi_struct *napi, int budget)
2661 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2663 struct status_block *sblk = bp->status_blk;
2666 work_done = bnx2_poll_work(bp, work_done, budget);
2668 if (unlikely(work_done >= budget))
2671 /* bp->last_status_idx is used below to tell the hw how
2672 * much work has been processed, so we must read it before
2673 * checking for more work.
2675 bp->last_status_idx = sblk->status_idx;
2677 if (likely(!bnx2_has_work(bp))) {
2678 netif_rx_complete(bp->dev, napi);
2679 if (likely(bp->flags & USING_MSI_FLAG)) {
2680 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2681 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2682 bp->last_status_idx);
2685 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2686 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2687 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2688 bp->last_status_idx);
2690 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2691 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2692 bp->last_status_idx);
2700 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2701 * from set_multicast.
2704 bnx2_set_rx_mode(struct net_device *dev)
2706 struct bnx2 *bp = netdev_priv(dev);
2707 u32 rx_mode, sort_mode;
2710 spin_lock_bh(&bp->phy_lock);
2712 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2713 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2714 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2716 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2717 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2719 if (!(bp->flags & ASF_ENABLE_FLAG))
2720 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2722 if (dev->flags & IFF_PROMISC) {
2723 /* Promiscuous mode. */
2724 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2725 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2726 BNX2_RPM_SORT_USER0_PROM_VLAN;
2728 else if (dev->flags & IFF_ALLMULTI) {
2729 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2730 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2733 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2736 /* Accept one or more multicast(s). */
2737 struct dev_mc_list *mclist;
2738 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2743 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2745 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2746 i++, mclist = mclist->next) {
2748 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2750 regidx = (bit & 0xe0) >> 5;
2752 mc_filter[regidx] |= (1 << bit);
2755 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2756 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2760 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2763 if (rx_mode != bp->rx_mode) {
2764 bp->rx_mode = rx_mode;
2765 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2768 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2769 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2770 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2772 spin_unlock_bh(&bp->phy_lock);
2776 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2783 for (i = 0; i < rv2p_code_len; i += 8) {
2784 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2786 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2789 if (rv2p_proc == RV2P_PROC1) {
2790 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2791 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2794 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2795 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2799 /* Reset the processor, un-stall is done later. */
2800 if (rv2p_proc == RV2P_PROC1) {
2801 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2804 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2809 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2816 val = REG_RD_IND(bp, cpu_reg->mode);
2817 val |= cpu_reg->mode_value_halt;
2818 REG_WR_IND(bp, cpu_reg->mode, val);
2819 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2821 /* Load the Text area. */
2822 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2826 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2831 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2832 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2836 /* Load the Data area. */
2837 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2841 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2842 REG_WR_IND(bp, offset, fw->data[j]);
2846 /* Load the SBSS area. */
2847 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2851 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2852 REG_WR_IND(bp, offset, 0);
2856 /* Load the BSS area. */
2857 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2861 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2862 REG_WR_IND(bp, offset, 0);
2866 /* Load the Read-Only area. */
2867 offset = cpu_reg->spad_base +
2868 (fw->rodata_addr - cpu_reg->mips_view_base);
2872 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2873 REG_WR_IND(bp, offset, fw->rodata[j]);
2877 /* Clear the pre-fetch instruction. */
2878 REG_WR_IND(bp, cpu_reg->inst, 0);
2879 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2881 /* Start the CPU. */
2882 val = REG_RD_IND(bp, cpu_reg->mode);
2883 val &= ~cpu_reg->mode_value_halt;
2884 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2885 REG_WR_IND(bp, cpu_reg->mode, val);
2891 bnx2_init_cpus(struct bnx2 *bp)
2893 struct cpu_reg cpu_reg;
2898 /* Initialize the RV2P processor. */
2899 text = vmalloc(FW_BUF_SIZE);
2902 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
2906 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2908 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
2912 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
2914 /* Initialize the RX Processor. */
2915 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2916 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2917 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2918 cpu_reg.state = BNX2_RXP_CPU_STATE;
2919 cpu_reg.state_value_clear = 0xffffff;
2920 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2921 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2922 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2923 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2924 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2925 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2926 cpu_reg.mips_view_base = 0x8000000;
2928 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2929 fw = &bnx2_rxp_fw_09;
2931 fw = &bnx2_rxp_fw_06;
2934 rc = load_cpu_fw(bp, &cpu_reg, fw);
2938 /* Initialize the TX Processor. */
2939 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2940 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2941 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2942 cpu_reg.state = BNX2_TXP_CPU_STATE;
2943 cpu_reg.state_value_clear = 0xffffff;
2944 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2945 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2946 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2947 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2948 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2949 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2950 cpu_reg.mips_view_base = 0x8000000;
2952 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2953 fw = &bnx2_txp_fw_09;
2955 fw = &bnx2_txp_fw_06;
2958 rc = load_cpu_fw(bp, &cpu_reg, fw);
2962 /* Initialize the TX Patch-up Processor. */
2963 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2964 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2965 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2966 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2967 cpu_reg.state_value_clear = 0xffffff;
2968 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2969 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2970 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2971 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2972 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2973 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2974 cpu_reg.mips_view_base = 0x8000000;
2976 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2977 fw = &bnx2_tpat_fw_09;
2979 fw = &bnx2_tpat_fw_06;
2982 rc = load_cpu_fw(bp, &cpu_reg, fw);
2986 /* Initialize the Completion Processor. */
2987 cpu_reg.mode = BNX2_COM_CPU_MODE;
2988 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2989 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2990 cpu_reg.state = BNX2_COM_CPU_STATE;
2991 cpu_reg.state_value_clear = 0xffffff;
2992 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2993 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2994 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2995 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2996 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2997 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2998 cpu_reg.mips_view_base = 0x8000000;
3000 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3001 fw = &bnx2_com_fw_09;
3003 fw = &bnx2_com_fw_06;
3006 rc = load_cpu_fw(bp, &cpu_reg, fw);
3010 /* Initialize the Command Processor. */
3011 cpu_reg.mode = BNX2_CP_CPU_MODE;
3012 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3013 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3014 cpu_reg.state = BNX2_CP_CPU_STATE;
3015 cpu_reg.state_value_clear = 0xffffff;
3016 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3017 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3018 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3019 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3020 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3021 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3022 cpu_reg.mips_view_base = 0x8000000;
3024 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3025 fw = &bnx2_cp_fw_09;
3028 rc = load_cpu_fw(bp, &cpu_reg, fw);
3038 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3042 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3048 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3049 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3050 PCI_PM_CTRL_PME_STATUS);
3052 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3053 /* delay required during transition out of D3hot */
3056 val = REG_RD(bp, BNX2_EMAC_MODE);
3057 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3058 val &= ~BNX2_EMAC_MODE_MPKT;
3059 REG_WR(bp, BNX2_EMAC_MODE, val);
3061 val = REG_RD(bp, BNX2_RPM_CONFIG);
3062 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3063 REG_WR(bp, BNX2_RPM_CONFIG, val);
3074 autoneg = bp->autoneg;
3075 advertising = bp->advertising;
3077 if (bp->phy_port == PORT_TP) {
3078 bp->autoneg = AUTONEG_SPEED;
3079 bp->advertising = ADVERTISED_10baseT_Half |
3080 ADVERTISED_10baseT_Full |
3081 ADVERTISED_100baseT_Half |
3082 ADVERTISED_100baseT_Full |
3086 spin_lock_bh(&bp->phy_lock);
3087 bnx2_setup_phy(bp, bp->phy_port);
3088 spin_unlock_bh(&bp->phy_lock);
3090 bp->autoneg = autoneg;
3091 bp->advertising = advertising;
3093 bnx2_set_mac_addr(bp);
3095 val = REG_RD(bp, BNX2_EMAC_MODE);
3097 /* Enable port mode. */
3098 val &= ~BNX2_EMAC_MODE_PORT;
3099 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3100 BNX2_EMAC_MODE_ACPI_RCVD |
3101 BNX2_EMAC_MODE_MPKT;
3102 if (bp->phy_port == PORT_TP)
3103 val |= BNX2_EMAC_MODE_PORT_MII;
3105 val |= BNX2_EMAC_MODE_PORT_GMII;
3106 if (bp->line_speed == SPEED_2500)
3107 val |= BNX2_EMAC_MODE_25G_MODE;
3110 REG_WR(bp, BNX2_EMAC_MODE, val);
3112 /* receive all multicast */
3113 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3114 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3117 REG_WR(bp, BNX2_EMAC_RX_MODE,
3118 BNX2_EMAC_RX_MODE_SORT_MODE);
3120 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3121 BNX2_RPM_SORT_USER0_MC_EN;
3122 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3123 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3124 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3125 BNX2_RPM_SORT_USER0_ENA);
3127 /* Need to enable EMAC and RPM for WOL. */
3128 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3129 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3130 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3131 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3133 val = REG_RD(bp, BNX2_RPM_CONFIG);
3134 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3135 REG_WR(bp, BNX2_RPM_CONFIG, val);
3137 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3140 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3143 if (!(bp->flags & NO_WOL_FLAG))
3144 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3146 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3147 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3148 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3157 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3159 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3162 /* No more memory access after this point until
3163 * device is brought back to D0.
3175 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3180 /* Request access to the flash interface. */
3181 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3182 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3183 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3184 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3190 if (j >= NVRAM_TIMEOUT_COUNT)
3197 bnx2_release_nvram_lock(struct bnx2 *bp)
3202 /* Relinquish nvram interface. */
3203 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3205 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3206 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3207 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3213 if (j >= NVRAM_TIMEOUT_COUNT)
3221 bnx2_enable_nvram_write(struct bnx2 *bp)
3225 val = REG_RD(bp, BNX2_MISC_CFG);
3226 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3228 if (bp->flash_info->flags & BNX2_NV_WREN) {
3231 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3232 REG_WR(bp, BNX2_NVM_COMMAND,
3233 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3235 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3238 val = REG_RD(bp, BNX2_NVM_COMMAND);
3239 if (val & BNX2_NVM_COMMAND_DONE)
3243 if (j >= NVRAM_TIMEOUT_COUNT)
3250 bnx2_disable_nvram_write(struct bnx2 *bp)
3254 val = REG_RD(bp, BNX2_MISC_CFG);
3255 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3260 bnx2_enable_nvram_access(struct bnx2 *bp)
3264 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3265 /* Enable both bits, even on read. */
3266 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3267 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3271 bnx2_disable_nvram_access(struct bnx2 *bp)
3275 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3276 /* Disable both bits, even after read. */
3277 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3278 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3279 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3283 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3288 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3289 /* Buffered flash, no erase needed */
3292 /* Build an erase command */
3293 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3294 BNX2_NVM_COMMAND_DOIT;
3296 /* Need to clear DONE bit separately. */
3297 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3299 /* Address of the NVRAM to read from. */
3300 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3302 /* Issue an erase command. */
3303 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3305 /* Wait for completion. */
3306 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3311 val = REG_RD(bp, BNX2_NVM_COMMAND);
3312 if (val & BNX2_NVM_COMMAND_DONE)
3316 if (j >= NVRAM_TIMEOUT_COUNT)
3323 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3328 /* Build the command word. */
3329 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3331 /* Calculate an offset of a buffered flash, not needed for 5709. */
3332 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3333 offset = ((offset / bp->flash_info->page_size) <<
3334 bp->flash_info->page_bits) +
3335 (offset % bp->flash_info->page_size);
3338 /* Need to clear DONE bit separately. */
3339 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3341 /* Address of the NVRAM to read from. */
3342 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3344 /* Issue a read command. */
3345 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3347 /* Wait for completion. */
3348 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3353 val = REG_RD(bp, BNX2_NVM_COMMAND);
3354 if (val & BNX2_NVM_COMMAND_DONE) {
3355 val = REG_RD(bp, BNX2_NVM_READ);
3357 val = be32_to_cpu(val);
3358 memcpy(ret_val, &val, 4);
3362 if (j >= NVRAM_TIMEOUT_COUNT)
3370 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3375 /* Build the command word. */
3376 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3378 /* Calculate an offset of a buffered flash, not needed for 5709. */
3379 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3380 offset = ((offset / bp->flash_info->page_size) <<
3381 bp->flash_info->page_bits) +
3382 (offset % bp->flash_info->page_size);
3385 /* Need to clear DONE bit separately. */
3386 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3388 memcpy(&val32, val, 4);
3389 val32 = cpu_to_be32(val32);
3391 /* Write the data. */
3392 REG_WR(bp, BNX2_NVM_WRITE, val32);
3394 /* Address of the NVRAM to write to. */
3395 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3397 /* Issue the write command. */
3398 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3400 /* Wait for completion. */
3401 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3404 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3407 if (j >= NVRAM_TIMEOUT_COUNT)
3414 bnx2_init_nvram(struct bnx2 *bp)
3417 int j, entry_count, rc = 0;
3418 struct flash_spec *flash;
3420 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3421 bp->flash_info = &flash_5709;
3422 goto get_flash_size;
3425 /* Determine the selected interface. */
3426 val = REG_RD(bp, BNX2_NVM_CFG1);
3428 entry_count = ARRAY_SIZE(flash_table);
3430 if (val & 0x40000000) {
3432 /* Flash interface has been reconfigured */
3433 for (j = 0, flash = &flash_table[0]; j < entry_count;
3435 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3436 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3437 bp->flash_info = flash;
3444 /* Not yet been reconfigured */
3446 if (val & (1 << 23))
3447 mask = FLASH_BACKUP_STRAP_MASK;
3449 mask = FLASH_STRAP_MASK;
3451 for (j = 0, flash = &flash_table[0]; j < entry_count;
3454 if ((val & mask) == (flash->strapping & mask)) {
3455 bp->flash_info = flash;
3457 /* Request access to the flash interface. */
3458 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3461 /* Enable access to flash interface */
3462 bnx2_enable_nvram_access(bp);
3464 /* Reconfigure the flash interface */
3465 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3466 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3467 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3468 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3470 /* Disable access to flash interface */
3471 bnx2_disable_nvram_access(bp);
3472 bnx2_release_nvram_lock(bp);
3477 } /* if (val & 0x40000000) */
3479 if (j == entry_count) {
3480 bp->flash_info = NULL;
3481 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3486 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3487 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3489 bp->flash_size = val;
3491 bp->flash_size = bp->flash_info->total_size;
3497 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3501 u32 cmd_flags, offset32, len32, extra;
3506 /* Request access to the flash interface. */
3507 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3510 /* Enable access to flash interface */
3511 bnx2_enable_nvram_access(bp);
3524 pre_len = 4 - (offset & 3);
3526 if (pre_len >= len32) {
3528 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3529 BNX2_NVM_COMMAND_LAST;
3532 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3535 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3540 memcpy(ret_buf, buf + (offset & 3), pre_len);
3547 extra = 4 - (len32 & 3);
3548 len32 = (len32 + 4) & ~3;
3555 cmd_flags = BNX2_NVM_COMMAND_LAST;
3557 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3558 BNX2_NVM_COMMAND_LAST;
3560 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3562 memcpy(ret_buf, buf, 4 - extra);
3564 else if (len32 > 0) {
3567 /* Read the first word. */
3571 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3573 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3575 /* Advance to the next dword. */
3580 while (len32 > 4 && rc == 0) {
3581 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3583 /* Advance to the next dword. */
3592 cmd_flags = BNX2_NVM_COMMAND_LAST;
3593 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3595 memcpy(ret_buf, buf, 4 - extra);
3598 /* Disable access to flash interface */
3599 bnx2_disable_nvram_access(bp);
3601 bnx2_release_nvram_lock(bp);
3607 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3610 u32 written, offset32, len32;
3611 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3613 int align_start, align_end;
3618 align_start = align_end = 0;
3620 if ((align_start = (offset32 & 3))) {
3622 len32 += align_start;
3625 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3630 align_end = 4 - (len32 & 3);
3632 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3636 if (align_start || align_end) {
3637 align_buf = kmalloc(len32, GFP_KERNEL);
3638 if (align_buf == NULL)
3641 memcpy(align_buf, start, 4);
3644 memcpy(align_buf + len32 - 4, end, 4);
3646 memcpy(align_buf + align_start, data_buf, buf_size);
3650 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3651 flash_buffer = kmalloc(264, GFP_KERNEL);
3652 if (flash_buffer == NULL) {
3654 goto nvram_write_end;
3659 while ((written < len32) && (rc == 0)) {
3660 u32 page_start, page_end, data_start, data_end;
3661 u32 addr, cmd_flags;
3664 /* Find the page_start addr */
3665 page_start = offset32 + written;
3666 page_start -= (page_start % bp->flash_info->page_size);
3667 /* Find the page_end addr */
3668 page_end = page_start + bp->flash_info->page_size;
3669 /* Find the data_start addr */
3670 data_start = (written == 0) ? offset32 : page_start;
3671 /* Find the data_end addr */
3672 data_end = (page_end > offset32 + len32) ?
3673 (offset32 + len32) : page_end;
3675 /* Request access to the flash interface. */
3676 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3677 goto nvram_write_end;
3679 /* Enable access to flash interface */
3680 bnx2_enable_nvram_access(bp);
3682 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3683 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3686 /* Read the whole page into the buffer
3687 * (non-buffer flash only) */
3688 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3689 if (j == (bp->flash_info->page_size - 4)) {
3690 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3692 rc = bnx2_nvram_read_dword(bp,
3698 goto nvram_write_end;
3704 /* Enable writes to flash interface (unlock write-protect) */
3705 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3706 goto nvram_write_end;
3708 /* Loop to write back the buffer data from page_start to
3711 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3712 /* Erase the page */
3713 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3714 goto nvram_write_end;
3716 /* Re-enable the write again for the actual write */
3717 bnx2_enable_nvram_write(bp);
3719 for (addr = page_start; addr < data_start;
3720 addr += 4, i += 4) {
3722 rc = bnx2_nvram_write_dword(bp, addr,
3723 &flash_buffer[i], cmd_flags);
3726 goto nvram_write_end;
3732 /* Loop to write the new data from data_start to data_end */
3733 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3734 if ((addr == page_end - 4) ||
3735 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3736 (addr == data_end - 4))) {
3738 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3740 rc = bnx2_nvram_write_dword(bp, addr, buf,
3744 goto nvram_write_end;
3750 /* Loop to write back the buffer data from data_end
3752 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3753 for (addr = data_end; addr < page_end;
3754 addr += 4, i += 4) {
3756 if (addr == page_end-4) {
3757 cmd_flags = BNX2_NVM_COMMAND_LAST;
3759 rc = bnx2_nvram_write_dword(bp, addr,
3760 &flash_buffer[i], cmd_flags);
3763 goto nvram_write_end;
3769 /* Disable writes to flash interface (lock write-protect) */
3770 bnx2_disable_nvram_write(bp);
3772 /* Disable access to flash interface */
3773 bnx2_disable_nvram_access(bp);
3774 bnx2_release_nvram_lock(bp);
3776 /* Increment written */
3777 written += data_end - data_start;
3781 kfree(flash_buffer);
3787 bnx2_init_remote_phy(struct bnx2 *bp)
3791 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3792 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3795 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3796 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3799 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3800 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3802 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3803 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3804 bp->phy_port = PORT_FIBRE;
3806 bp->phy_port = PORT_TP;
3808 if (netif_running(bp->dev)) {
3811 if (val & BNX2_LINK_STATUS_LINK_UP) {
3813 netif_carrier_on(bp->dev);
3816 netif_carrier_off(bp->dev);
3818 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
3819 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3820 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3827 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3833 /* Wait for the current PCI transaction to complete before
3834 * issuing a reset. */
3835 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3836 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3837 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3838 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3839 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3840 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3843 /* Wait for the firmware to tell us it is ok to issue a reset. */
3844 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3846 /* Deposit a driver reset signature so the firmware knows that
3847 * this is a soft reset. */
3848 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3849 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3851 /* Do a dummy read to force the chip to complete all current transaction
3852 * before we issue a reset. */
3853 val = REG_RD(bp, BNX2_MISC_ID);
3855 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3856 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3857 REG_RD(bp, BNX2_MISC_COMMAND);
3860 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3861 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3863 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3866 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3867 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3868 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3871 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3873 /* Reading back any register after chip reset will hang the
3874 * bus on 5706 A0 and A1. The msleep below provides plenty
3875 * of margin for write posting.
3877 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3878 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3881 /* Reset takes approximate 30 usec */
3882 for (i = 0; i < 10; i++) {
3883 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3884 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3885 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3890 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3891 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3892 printk(KERN_ERR PFX "Chip reset did not complete\n");
3897 /* Make sure byte swapping is properly configured. */
3898 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3899 if (val != 0x01020304) {
3900 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3904 /* Wait for the firmware to finish its initialization. */
3905 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3909 spin_lock_bh(&bp->phy_lock);
3910 old_port = bp->phy_port;
3911 bnx2_init_remote_phy(bp);
3912 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
3913 bnx2_set_default_remote_link(bp);
3914 spin_unlock_bh(&bp->phy_lock);
3916 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3917 /* Adjust the voltage regular to two steps lower. The default
3918 * of this register is 0x0000000e. */
3919 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3921 /* Remove bad rbuf memory from the free pool. */
3922 rc = bnx2_alloc_bad_rbuf(bp);
3929 bnx2_init_chip(struct bnx2 *bp)
3934 /* Make sure the interrupt is not active. */
3935 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3937 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3938 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3940 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3942 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3943 DMA_READ_CHANS << 12 |
3944 DMA_WRITE_CHANS << 16;
3946 val |= (0x2 << 20) | (1 << 11);
3948 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3951 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3952 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3953 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3955 REG_WR(bp, BNX2_DMA_CONFIG, val);
3957 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3958 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3959 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3960 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3963 if (bp->flags & PCIX_FLAG) {
3966 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3968 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3969 val16 & ~PCI_X_CMD_ERO);
3972 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3973 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3974 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3975 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3977 /* Initialize context mapping and zero out the quick contexts. The
3978 * context block must have already been enabled. */
3979 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3980 rc = bnx2_init_5709_context(bp);
3984 bnx2_init_context(bp);
3986 if ((rc = bnx2_init_cpus(bp)) != 0)
3989 bnx2_init_nvram(bp);
3991 bnx2_set_mac_addr(bp);
3993 val = REG_RD(bp, BNX2_MQ_CONFIG);
3994 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3995 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3996 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3997 val |= BNX2_MQ_CONFIG_HALT_DIS;
3999 REG_WR(bp, BNX2_MQ_CONFIG, val);
4001 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4002 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4003 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4005 val = (BCM_PAGE_BITS - 8) << 24;
4006 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4008 /* Configure page size. */
4009 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4010 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4011 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4012 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4014 val = bp->mac_addr[0] +
4015 (bp->mac_addr[1] << 8) +
4016 (bp->mac_addr[2] << 16) +
4018 (bp->mac_addr[4] << 8) +
4019 (bp->mac_addr[5] << 16);
4020 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4022 /* Program the MTU. Also include 4 bytes for CRC32. */
4023 val = bp->dev->mtu + ETH_HLEN + 4;
4024 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4025 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4026 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4028 bp->last_status_idx = 0;
4029 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4031 /* Set up how to generate a link change interrupt. */
4032 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4034 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4035 (u64) bp->status_blk_mapping & 0xffffffff);
4036 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4038 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4039 (u64) bp->stats_blk_mapping & 0xffffffff);
4040 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4041 (u64) bp->stats_blk_mapping >> 32);
4043 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4044 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4046 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4047 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4049 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4050 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4052 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4054 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4056 REG_WR(bp, BNX2_HC_COM_TICKS,
4057 (bp->com_ticks_int << 16) | bp->com_ticks);
4059 REG_WR(bp, BNX2_HC_CMD_TICKS,
4060 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4062 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4063 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4065 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4066 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4068 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4069 val = BNX2_HC_CONFIG_COLLECT_STATS;
4071 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4072 BNX2_HC_CONFIG_COLLECT_STATS;
4075 if (bp->flags & ONE_SHOT_MSI_FLAG)
4076 val |= BNX2_HC_CONFIG_ONE_SHOT;
4078 REG_WR(bp, BNX2_HC_CONFIG, val);
4080 /* Clear internal stats counters. */
4081 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4083 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4085 /* Initialize the receive filter. */
4086 bnx2_set_rx_mode(bp->dev);
4088 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4089 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4090 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4091 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4093 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4096 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4097 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4101 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4107 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4109 u32 val, offset0, offset1, offset2, offset3;
4111 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4112 offset0 = BNX2_L2CTX_TYPE_XI;
4113 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4114 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4115 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4117 offset0 = BNX2_L2CTX_TYPE;
4118 offset1 = BNX2_L2CTX_CMD_TYPE;
4119 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4120 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4122 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4123 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4125 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4126 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4128 val = (u64) bp->tx_desc_mapping >> 32;
4129 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4131 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4132 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4136 bnx2_init_tx_ring(struct bnx2 *bp)
4141 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4143 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4145 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4146 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4151 bp->tx_prod_bseq = 0;
4154 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4155 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4157 bnx2_init_tx_context(bp, cid);
4161 bnx2_init_rx_ring(struct bnx2 *bp)
4165 u16 prod, ring_prod;
4168 /* 8 for CRC and VLAN */
4169 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4171 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4173 ring_prod = prod = bp->rx_prod = 0;
4175 bp->rx_prod_bseq = 0;
4177 for (i = 0; i < bp->rx_max_ring; i++) {
4180 rxbd = &bp->rx_desc_ring[i][0];
4181 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4182 rxbd->rx_bd_len = bp->rx_buf_use_size;
4183 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4185 if (i == (bp->rx_max_ring - 1))
4189 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4190 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4194 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4195 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4197 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4199 val = (u64) bp->rx_desc_mapping[0] >> 32;
4200 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4202 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4203 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4205 for (i = 0; i < bp->rx_ring_size; i++) {
4206 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4209 prod = NEXT_RX_BD(prod);
4210 ring_prod = RX_RING_IDX(prod);
4214 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4216 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4220 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4224 bp->rx_ring_size = size;
4226 while (size > MAX_RX_DESC_CNT) {
4227 size -= MAX_RX_DESC_CNT;
4230 /* round to next power of 2 */
4232 while ((max & num_rings) == 0)
4235 if (num_rings != max)
4238 bp->rx_max_ring = max;
4239 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4243 bnx2_free_tx_skbs(struct bnx2 *bp)
4247 if (bp->tx_buf_ring == NULL)
4250 for (i = 0; i < TX_DESC_CNT; ) {
4251 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4252 struct sk_buff *skb = tx_buf->skb;
4260 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4261 skb_headlen(skb), PCI_DMA_TODEVICE);
4265 last = skb_shinfo(skb)->nr_frags;
4266 for (j = 0; j < last; j++) {
4267 tx_buf = &bp->tx_buf_ring[i + j + 1];
4268 pci_unmap_page(bp->pdev,
4269 pci_unmap_addr(tx_buf, mapping),
4270 skb_shinfo(skb)->frags[j].size,
4280 bnx2_free_rx_skbs(struct bnx2 *bp)
4284 if (bp->rx_buf_ring == NULL)
4287 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4288 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4289 struct sk_buff *skb = rx_buf->skb;
4294 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4295 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4304 bnx2_free_skbs(struct bnx2 *bp)
4306 bnx2_free_tx_skbs(bp);
4307 bnx2_free_rx_skbs(bp);
4311 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4315 rc = bnx2_reset_chip(bp, reset_code);
4320 if ((rc = bnx2_init_chip(bp)) != 0)
4323 bnx2_init_tx_ring(bp);
4324 bnx2_init_rx_ring(bp);
4329 bnx2_init_nic(struct bnx2 *bp)
4333 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4336 spin_lock_bh(&bp->phy_lock);
4339 spin_unlock_bh(&bp->phy_lock);
4344 bnx2_test_registers(struct bnx2 *bp)
4348 static const struct {
4351 #define BNX2_FL_NOT_5709 1
4355 { 0x006c, 0, 0x00000000, 0x0000003f },
4356 { 0x0090, 0, 0xffffffff, 0x00000000 },
4357 { 0x0094, 0, 0x00000000, 0x00000000 },
4359 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4360 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4361 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4362 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4363 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4364 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4365 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4366 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4367 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4369 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4370 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4371 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4372 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4373 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4374 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4376 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4377 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4378 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4380 { 0x1000, 0, 0x00000000, 0x00000001 },
4381 { 0x1004, 0, 0x00000000, 0x000f0001 },
4383 { 0x1408, 0, 0x01c00800, 0x00000000 },
4384 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4385 { 0x14a8, 0, 0x00000000, 0x000001ff },
4386 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4387 { 0x14b0, 0, 0x00000002, 0x00000001 },
4388 { 0x14b8, 0, 0x00000000, 0x00000000 },
4389 { 0x14c0, 0, 0x00000000, 0x00000009 },
4390 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4391 { 0x14cc, 0, 0x00000000, 0x00000001 },
4392 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4394 { 0x1800, 0, 0x00000000, 0x00000001 },
4395 { 0x1804, 0, 0x00000000, 0x00000003 },
4397 { 0x2800, 0, 0x00000000, 0x00000001 },
4398 { 0x2804, 0, 0x00000000, 0x00003f01 },
4399 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4400 { 0x2810, 0, 0xffff0000, 0x00000000 },
4401 { 0x2814, 0, 0xffff0000, 0x00000000 },
4402 { 0x2818, 0, 0xffff0000, 0x00000000 },
4403 { 0x281c, 0, 0xffff0000, 0x00000000 },
4404 { 0x2834, 0, 0xffffffff, 0x00000000 },
4405 { 0x2840, 0, 0x00000000, 0xffffffff },
4406 { 0x2844, 0, 0x00000000, 0xffffffff },
4407 { 0x2848, 0, 0xffffffff, 0x00000000 },
4408 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4410 { 0x2c00, 0, 0x00000000, 0x00000011 },
4411 { 0x2c04, 0, 0x00000000, 0x00030007 },
4413 { 0x3c00, 0, 0x00000000, 0x00000001 },
4414 { 0x3c04, 0, 0x00000000, 0x00070000 },
4415 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4416 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4417 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4418 { 0x3c14, 0, 0x00000000, 0xffffffff },
4419 { 0x3c18, 0, 0x00000000, 0xffffffff },
4420 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4421 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4423 { 0x5004, 0, 0x00000000, 0x0000007f },
4424 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4426 { 0x5c00, 0, 0x00000000, 0x00000001 },
4427 { 0x5c04, 0, 0x00000000, 0x0003000f },
4428 { 0x5c08, 0, 0x00000003, 0x00000000 },
4429 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4430 { 0x5c10, 0, 0x00000000, 0xffffffff },
4431 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4432 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4433 { 0x5c88, 0, 0x00000000, 0x00077373 },
4434 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4436 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4437 { 0x680c, 0, 0xffffffff, 0x00000000 },
4438 { 0x6810, 0, 0xffffffff, 0x00000000 },
4439 { 0x6814, 0, 0xffffffff, 0x00000000 },
4440 { 0x6818, 0, 0xffffffff, 0x00000000 },
4441 { 0x681c, 0, 0xffffffff, 0x00000000 },
4442 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4443 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4444 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4445 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4446 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4447 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4448 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4449 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4450 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4451 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4452 { 0x684c, 0, 0xffffffff, 0x00000000 },
4453 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4454 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4455 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4456 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4457 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4458 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4460 { 0xffff, 0, 0x00000000, 0x00000000 },
4465 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4468 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4469 u32 offset, rw_mask, ro_mask, save_val, val;
4470 u16 flags = reg_tbl[i].flags;
4472 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4475 offset = (u32) reg_tbl[i].offset;
4476 rw_mask = reg_tbl[i].rw_mask;
4477 ro_mask = reg_tbl[i].ro_mask;
4479 save_val = readl(bp->regview + offset);
4481 writel(0, bp->regview + offset);
4483 val = readl(bp->regview + offset);
4484 if ((val & rw_mask) != 0) {
4488 if ((val & ro_mask) != (save_val & ro_mask)) {
4492 writel(0xffffffff, bp->regview + offset);
4494 val = readl(bp->regview + offset);
4495 if ((val & rw_mask) != rw_mask) {
4499 if ((val & ro_mask) != (save_val & ro_mask)) {
4503 writel(save_val, bp->regview + offset);
4507 writel(save_val, bp->regview + offset);
4515 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4517 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4518 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4521 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4524 for (offset = 0; offset < size; offset += 4) {
4526 REG_WR_IND(bp, start + offset, test_pattern[i]);
4528 if (REG_RD_IND(bp, start + offset) !=
4538 bnx2_test_memory(struct bnx2 *bp)
4542 static struct mem_entry {
4545 } mem_tbl_5706[] = {
4546 { 0x60000, 0x4000 },
4547 { 0xa0000, 0x3000 },
4548 { 0xe0000, 0x4000 },
4549 { 0x120000, 0x4000 },
4550 { 0x1a0000, 0x4000 },
4551 { 0x160000, 0x4000 },
4555 { 0x60000, 0x4000 },
4556 { 0xa0000, 0x3000 },
4557 { 0xe0000, 0x4000 },
4558 { 0x120000, 0x4000 },
4559 { 0x1a0000, 0x4000 },
4562 struct mem_entry *mem_tbl;
4564 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4565 mem_tbl = mem_tbl_5709;
4567 mem_tbl = mem_tbl_5706;
4569 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4570 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4571 mem_tbl[i].len)) != 0) {
4579 #define BNX2_MAC_LOOPBACK 0
4580 #define BNX2_PHY_LOOPBACK 1
4583 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4585 unsigned int pkt_size, num_pkts, i;
4586 struct sk_buff *skb, *rx_skb;
4587 unsigned char *packet;
4588 u16 rx_start_idx, rx_idx;
4591 struct sw_bd *rx_buf;
4592 struct l2_fhdr *rx_hdr;
4595 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4596 bp->loopback = MAC_LOOPBACK;
4597 bnx2_set_mac_loopback(bp);
4599 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4600 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4603 bp->loopback = PHY_LOOPBACK;
4604 bnx2_set_phy_loopback(bp);
4610 skb = netdev_alloc_skb(bp->dev, pkt_size);
4613 packet = skb_put(skb, pkt_size);
4614 memcpy(packet, bp->dev->dev_addr, 6);
4615 memset(packet + 6, 0x0, 8);
4616 for (i = 14; i < pkt_size; i++)
4617 packet[i] = (unsigned char) (i & 0xff);
4619 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4622 REG_WR(bp, BNX2_HC_COMMAND,
4623 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4625 REG_RD(bp, BNX2_HC_COMMAND);
4628 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4632 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4634 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4635 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4636 txbd->tx_bd_mss_nbytes = pkt_size;
4637 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4640 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4641 bp->tx_prod_bseq += pkt_size;
4643 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4644 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4648 REG_WR(bp, BNX2_HC_COMMAND,
4649 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4651 REG_RD(bp, BNX2_HC_COMMAND);
4655 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4658 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4659 goto loopback_test_done;
4662 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4663 if (rx_idx != rx_start_idx + num_pkts) {
4664 goto loopback_test_done;
4667 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4668 rx_skb = rx_buf->skb;
4670 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4671 skb_reserve(rx_skb, bp->rx_offset);
4673 pci_dma_sync_single_for_cpu(bp->pdev,
4674 pci_unmap_addr(rx_buf, mapping),
4675 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4677 if (rx_hdr->l2_fhdr_status &
4678 (L2_FHDR_ERRORS_BAD_CRC |
4679 L2_FHDR_ERRORS_PHY_DECODE |
4680 L2_FHDR_ERRORS_ALIGNMENT |
4681 L2_FHDR_ERRORS_TOO_SHORT |
4682 L2_FHDR_ERRORS_GIANT_FRAME)) {
4684 goto loopback_test_done;
4687 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4688 goto loopback_test_done;
4691 for (i = 14; i < pkt_size; i++) {
4692 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4693 goto loopback_test_done;
4704 #define BNX2_MAC_LOOPBACK_FAILED 1
4705 #define BNX2_PHY_LOOPBACK_FAILED 2
4706 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4707 BNX2_PHY_LOOPBACK_FAILED)
4710 bnx2_test_loopback(struct bnx2 *bp)
4714 if (!netif_running(bp->dev))
4715 return BNX2_LOOPBACK_FAILED;
4717 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4718 spin_lock_bh(&bp->phy_lock);
4720 spin_unlock_bh(&bp->phy_lock);
4721 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4722 rc |= BNX2_MAC_LOOPBACK_FAILED;
4723 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4724 rc |= BNX2_PHY_LOOPBACK_FAILED;
4728 #define NVRAM_SIZE 0x200
4729 #define CRC32_RESIDUAL 0xdebb20e3
4732 bnx2_test_nvram(struct bnx2 *bp)
4734 u32 buf[NVRAM_SIZE / 4];
4735 u8 *data = (u8 *) buf;
4739 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4740 goto test_nvram_done;
4742 magic = be32_to_cpu(buf[0]);
4743 if (magic != 0x669955aa) {
4745 goto test_nvram_done;
4748 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4749 goto test_nvram_done;
4751 csum = ether_crc_le(0x100, data);
4752 if (csum != CRC32_RESIDUAL) {
4754 goto test_nvram_done;
4757 csum = ether_crc_le(0x100, data + 0x100);
4758 if (csum != CRC32_RESIDUAL) {
4767 bnx2_test_link(struct bnx2 *bp)
4771 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
4776 spin_lock_bh(&bp->phy_lock);
4777 bnx2_enable_bmsr1(bp);
4778 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4779 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4780 bnx2_disable_bmsr1(bp);
4781 spin_unlock_bh(&bp->phy_lock);
4783 if (bmsr & BMSR_LSTATUS) {
4790 bnx2_test_intr(struct bnx2 *bp)
4795 if (!netif_running(bp->dev))
4798 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4800 /* This register is not touched during run-time. */
4801 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4802 REG_RD(bp, BNX2_HC_COMMAND);
4804 for (i = 0; i < 10; i++) {
4805 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4811 msleep_interruptible(10);
4820 bnx2_5706_serdes_timer(struct bnx2 *bp)
4822 spin_lock(&bp->phy_lock);
4823 if (bp->serdes_an_pending)
4824 bp->serdes_an_pending--;
4825 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4828 bp->current_interval = bp->timer_interval;
4830 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4832 if (bmcr & BMCR_ANENABLE) {
4835 bnx2_write_phy(bp, 0x1c, 0x7c00);
4836 bnx2_read_phy(bp, 0x1c, &phy1);
4838 bnx2_write_phy(bp, 0x17, 0x0f01);
4839 bnx2_read_phy(bp, 0x15, &phy2);
4840 bnx2_write_phy(bp, 0x17, 0x0f01);
4841 bnx2_read_phy(bp, 0x15, &phy2);
4843 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4844 !(phy2 & 0x20)) { /* no CONFIG */
4846 bmcr &= ~BMCR_ANENABLE;
4847 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4848 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4849 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4853 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4854 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4857 bnx2_write_phy(bp, 0x17, 0x0f01);
4858 bnx2_read_phy(bp, 0x15, &phy2);
4862 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4863 bmcr |= BMCR_ANENABLE;
4864 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4866 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4869 bp->current_interval = bp->timer_interval;
4871 spin_unlock(&bp->phy_lock);
4875 bnx2_5708_serdes_timer(struct bnx2 *bp)
4877 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4880 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4881 bp->serdes_an_pending = 0;
4885 spin_lock(&bp->phy_lock);
4886 if (bp->serdes_an_pending)
4887 bp->serdes_an_pending--;
4888 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4891 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4892 if (bmcr & BMCR_ANENABLE) {
4893 bnx2_enable_forced_2g5(bp);
4894 bp->current_interval = SERDES_FORCED_TIMEOUT;
4896 bnx2_disable_forced_2g5(bp);
4897 bp->serdes_an_pending = 2;
4898 bp->current_interval = bp->timer_interval;
4902 bp->current_interval = bp->timer_interval;
4904 spin_unlock(&bp->phy_lock);
4908 bnx2_timer(unsigned long data)
4910 struct bnx2 *bp = (struct bnx2 *) data;
4912 if (!netif_running(bp->dev))
4915 if (atomic_read(&bp->intr_sem) != 0)
4916 goto bnx2_restart_timer;
4918 bnx2_send_heart_beat(bp);
4920 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4922 /* workaround occasional corrupted counters */
4923 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4924 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4925 BNX2_HC_COMMAND_STATS_NOW);
4927 if (bp->phy_flags & PHY_SERDES_FLAG) {
4928 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4929 bnx2_5706_serdes_timer(bp);
4931 bnx2_5708_serdes_timer(bp);
4935 mod_timer(&bp->timer, jiffies + bp->current_interval);
4939 bnx2_request_irq(struct bnx2 *bp)
4941 struct net_device *dev = bp->dev;
4944 if (bp->flags & USING_MSI_FLAG) {
4945 irq_handler_t fn = bnx2_msi;
4947 if (bp->flags & ONE_SHOT_MSI_FLAG)
4948 fn = bnx2_msi_1shot;
4950 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4952 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4953 IRQF_SHARED, dev->name, dev);
4958 bnx2_free_irq(struct bnx2 *bp)
4960 struct net_device *dev = bp->dev;
4962 if (bp->flags & USING_MSI_FLAG) {
4963 free_irq(bp->pdev->irq, dev);
4964 pci_disable_msi(bp->pdev);
4965 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4967 free_irq(bp->pdev->irq, dev);
4970 /* Called with rtnl_lock */
4972 bnx2_open(struct net_device *dev)
4974 struct bnx2 *bp = netdev_priv(dev);
4977 netif_carrier_off(dev);
4979 bnx2_set_power_state(bp, PCI_D0);
4980 bnx2_disable_int(bp);
4982 rc = bnx2_alloc_mem(bp);
4986 napi_enable(&bp->napi);
4988 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4989 if (pci_enable_msi(bp->pdev) == 0) {
4990 bp->flags |= USING_MSI_FLAG;
4991 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4992 bp->flags |= ONE_SHOT_MSI_FLAG;
4995 rc = bnx2_request_irq(bp);
4998 napi_disable(&bp->napi);
5003 rc = bnx2_init_nic(bp);
5006 napi_disable(&bp->napi);
5013 mod_timer(&bp->timer, jiffies + bp->current_interval);
5015 atomic_set(&bp->intr_sem, 0);
5017 bnx2_enable_int(bp);
5019 if (bp->flags & USING_MSI_FLAG) {
5020 /* Test MSI to make sure it is working
5021 * If MSI test fails, go back to INTx mode
5023 if (bnx2_test_intr(bp) != 0) {
5024 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5025 " using MSI, switching to INTx mode. Please"
5026 " report this failure to the PCI maintainer"
5027 " and include system chipset information.\n",
5030 bnx2_disable_int(bp);
5033 rc = bnx2_init_nic(bp);
5036 rc = bnx2_request_irq(bp);
5039 napi_disable(&bp->napi);
5042 del_timer_sync(&bp->timer);
5045 bnx2_enable_int(bp);
5048 if (bp->flags & USING_MSI_FLAG) {
5049 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5052 netif_start_queue(dev);
5058 bnx2_reset_task(struct work_struct *work)
5060 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5062 if (!netif_running(bp->dev))
5065 bp->in_reset_task = 1;
5066 bnx2_netif_stop(bp);
5070 atomic_set(&bp->intr_sem, 1);
5071 bnx2_netif_start(bp);
5072 bp->in_reset_task = 0;
5076 bnx2_tx_timeout(struct net_device *dev)
5078 struct bnx2 *bp = netdev_priv(dev);
5080 /* This allows the netif to be shutdown gracefully before resetting */
5081 schedule_work(&bp->reset_task);
5085 /* Called with rtnl_lock */
5087 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5089 struct bnx2 *bp = netdev_priv(dev);
5091 bnx2_netif_stop(bp);
5094 bnx2_set_rx_mode(dev);
5096 bnx2_netif_start(bp);
5100 /* Called with netif_tx_lock.
5101 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5102 * netif_wake_queue().
5105 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5107 struct bnx2 *bp = netdev_priv(dev);
5110 struct sw_bd *tx_buf;
5111 u32 len, vlan_tag_flags, last_frag, mss;
5112 u16 prod, ring_prod;
5115 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5116 netif_stop_queue(dev);
5117 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5120 return NETDEV_TX_BUSY;
5122 len = skb_headlen(skb);
5124 ring_prod = TX_RING_IDX(prod);
5127 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5128 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5131 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5133 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5135 if ((mss = skb_shinfo(skb)->gso_size)) {
5136 u32 tcp_opt_len, ip_tcp_len;
5139 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5141 tcp_opt_len = tcp_optlen(skb);
5143 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5144 u32 tcp_off = skb_transport_offset(skb) -
5145 sizeof(struct ipv6hdr) - ETH_HLEN;
5147 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5148 TX_BD_FLAGS_SW_FLAGS;
5149 if (likely(tcp_off == 0))
5150 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5153 vlan_tag_flags |= ((tcp_off & 0x3) <<
5154 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5155 ((tcp_off & 0x10) <<
5156 TX_BD_FLAGS_TCP6_OFF4_SHL);
5157 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5160 if (skb_header_cloned(skb) &&
5161 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5163 return NETDEV_TX_OK;
5166 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5170 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5171 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5175 if (tcp_opt_len || (iph->ihl > 5)) {
5176 vlan_tag_flags |= ((iph->ihl - 5) +
5177 (tcp_opt_len >> 2)) << 8;
5183 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5185 tx_buf = &bp->tx_buf_ring[ring_prod];
5187 pci_unmap_addr_set(tx_buf, mapping, mapping);
5189 txbd = &bp->tx_desc_ring[ring_prod];
5191 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5192 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5193 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5194 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5196 last_frag = skb_shinfo(skb)->nr_frags;
5198 for (i = 0; i < last_frag; i++) {
5199 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5201 prod = NEXT_TX_BD(prod);
5202 ring_prod = TX_RING_IDX(prod);
5203 txbd = &bp->tx_desc_ring[ring_prod];
5206 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5207 len, PCI_DMA_TODEVICE);
5208 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5211 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5212 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5213 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5214 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5217 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5219 prod = NEXT_TX_BD(prod);
5220 bp->tx_prod_bseq += skb->len;
5222 REG_WR16(bp, bp->tx_bidx_addr, prod);
5223 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5228 dev->trans_start = jiffies;
5230 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5231 netif_stop_queue(dev);
5232 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5233 netif_wake_queue(dev);
5236 return NETDEV_TX_OK;
5239 /* Called with rtnl_lock */
5241 bnx2_close(struct net_device *dev)
5243 struct bnx2 *bp = netdev_priv(dev);
5246 /* Calling flush_scheduled_work() may deadlock because
5247 * linkwatch_event() may be on the workqueue and it will try to get
5248 * the rtnl_lock which we are holding.
5250 while (bp->in_reset_task)
5253 bnx2_disable_int_sync(bp);
5254 napi_disable(&bp->napi);
5255 del_timer_sync(&bp->timer);
5256 if (bp->flags & NO_WOL_FLAG)
5257 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5259 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5261 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5262 bnx2_reset_chip(bp, reset_code);
5267 netif_carrier_off(bp->dev);
5268 bnx2_set_power_state(bp, PCI_D3hot);
5272 #define GET_NET_STATS64(ctr) \
5273 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5274 (unsigned long) (ctr##_lo)
5276 #define GET_NET_STATS32(ctr) \
5279 #if (BITS_PER_LONG == 64)
5280 #define GET_NET_STATS GET_NET_STATS64
5282 #define GET_NET_STATS GET_NET_STATS32
5285 static struct net_device_stats *
5286 bnx2_get_stats(struct net_device *dev)
5288 struct bnx2 *bp = netdev_priv(dev);
5289 struct statistics_block *stats_blk = bp->stats_blk;
5290 struct net_device_stats *net_stats = &bp->net_stats;
5292 if (bp->stats_blk == NULL) {
5295 net_stats->rx_packets =
5296 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5297 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5298 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5300 net_stats->tx_packets =
5301 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5302 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5303 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5305 net_stats->rx_bytes =
5306 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5308 net_stats->tx_bytes =
5309 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5311 net_stats->multicast =
5312 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5314 net_stats->collisions =
5315 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5317 net_stats->rx_length_errors =
5318 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5319 stats_blk->stat_EtherStatsOverrsizePkts);
5321 net_stats->rx_over_errors =
5322 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5324 net_stats->rx_frame_errors =
5325 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5327 net_stats->rx_crc_errors =
5328 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5330 net_stats->rx_errors = net_stats->rx_length_errors +
5331 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5332 net_stats->rx_crc_errors;
5334 net_stats->tx_aborted_errors =
5335 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5336 stats_blk->stat_Dot3StatsLateCollisions);
5338 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5339 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5340 net_stats->tx_carrier_errors = 0;
5342 net_stats->tx_carrier_errors =
5344 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5347 net_stats->tx_errors =
5349 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5351 net_stats->tx_aborted_errors +
5352 net_stats->tx_carrier_errors;
5354 net_stats->rx_missed_errors =
5355 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5356 stats_blk->stat_FwRxDrop);
5361 /* All ethtool functions called with rtnl_lock */
5364 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5366 struct bnx2 *bp = netdev_priv(dev);
5367 int support_serdes = 0, support_copper = 0;
5369 cmd->supported = SUPPORTED_Autoneg;
5370 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5373 } else if (bp->phy_port == PORT_FIBRE)
5378 if (support_serdes) {
5379 cmd->supported |= SUPPORTED_1000baseT_Full |
5381 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5382 cmd->supported |= SUPPORTED_2500baseX_Full;
5385 if (support_copper) {
5386 cmd->supported |= SUPPORTED_10baseT_Half |
5387 SUPPORTED_10baseT_Full |
5388 SUPPORTED_100baseT_Half |
5389 SUPPORTED_100baseT_Full |
5390 SUPPORTED_1000baseT_Full |
5395 spin_lock_bh(&bp->phy_lock);
5396 cmd->port = bp->phy_port;
5397 cmd->advertising = bp->advertising;
5399 if (bp->autoneg & AUTONEG_SPEED) {
5400 cmd->autoneg = AUTONEG_ENABLE;
5403 cmd->autoneg = AUTONEG_DISABLE;
5406 if (netif_carrier_ok(dev)) {
5407 cmd->speed = bp->line_speed;
5408 cmd->duplex = bp->duplex;
5414 spin_unlock_bh(&bp->phy_lock);
5416 cmd->transceiver = XCVR_INTERNAL;
5417 cmd->phy_address = bp->phy_addr;
5423 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5425 struct bnx2 *bp = netdev_priv(dev);
5426 u8 autoneg = bp->autoneg;
5427 u8 req_duplex = bp->req_duplex;
5428 u16 req_line_speed = bp->req_line_speed;
5429 u32 advertising = bp->advertising;
5432 spin_lock_bh(&bp->phy_lock);
5434 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5435 goto err_out_unlock;
5437 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5438 goto err_out_unlock;
5440 if (cmd->autoneg == AUTONEG_ENABLE) {
5441 autoneg |= AUTONEG_SPEED;
5443 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5445 /* allow advertising 1 speed */
5446 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5447 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5448 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5449 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5451 if (cmd->port == PORT_FIBRE)
5452 goto err_out_unlock;
5454 advertising = cmd->advertising;
5456 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5457 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5458 (cmd->port == PORT_TP))
5459 goto err_out_unlock;
5460 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5461 advertising = cmd->advertising;
5462 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5463 goto err_out_unlock;
5465 if (cmd->port == PORT_FIBRE)
5466 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5468 advertising = ETHTOOL_ALL_COPPER_SPEED;
5470 advertising |= ADVERTISED_Autoneg;
5473 if (cmd->port == PORT_FIBRE) {
5474 if ((cmd->speed != SPEED_1000 &&
5475 cmd->speed != SPEED_2500) ||
5476 (cmd->duplex != DUPLEX_FULL))
5477 goto err_out_unlock;
5479 if (cmd->speed == SPEED_2500 &&
5480 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5481 goto err_out_unlock;
5483 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5484 goto err_out_unlock;
5486 autoneg &= ~AUTONEG_SPEED;
5487 req_line_speed = cmd->speed;
5488 req_duplex = cmd->duplex;
5492 bp->autoneg = autoneg;
5493 bp->advertising = advertising;
5494 bp->req_line_speed = req_line_speed;
5495 bp->req_duplex = req_duplex;
5497 err = bnx2_setup_phy(bp, cmd->port);
5500 spin_unlock_bh(&bp->phy_lock);
5506 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5508 struct bnx2 *bp = netdev_priv(dev);
5510 strcpy(info->driver, DRV_MODULE_NAME);
5511 strcpy(info->version, DRV_MODULE_VERSION);
5512 strcpy(info->bus_info, pci_name(bp->pdev));
5513 strcpy(info->fw_version, bp->fw_version);
5516 #define BNX2_REGDUMP_LEN (32 * 1024)
5519 bnx2_get_regs_len(struct net_device *dev)
5521 return BNX2_REGDUMP_LEN;
5525 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5527 u32 *p = _p, i, offset;
5529 struct bnx2 *bp = netdev_priv(dev);
5530 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5531 0x0800, 0x0880, 0x0c00, 0x0c10,
5532 0x0c30, 0x0d08, 0x1000, 0x101c,
5533 0x1040, 0x1048, 0x1080, 0x10a4,
5534 0x1400, 0x1490, 0x1498, 0x14f0,
5535 0x1500, 0x155c, 0x1580, 0x15dc,
5536 0x1600, 0x1658, 0x1680, 0x16d8,
5537 0x1800, 0x1820, 0x1840, 0x1854,
5538 0x1880, 0x1894, 0x1900, 0x1984,
5539 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5540 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5541 0x2000, 0x2030, 0x23c0, 0x2400,
5542 0x2800, 0x2820, 0x2830, 0x2850,
5543 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5544 0x3c00, 0x3c94, 0x4000, 0x4010,
5545 0x4080, 0x4090, 0x43c0, 0x4458,
5546 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5547 0x4fc0, 0x5010, 0x53c0, 0x5444,
5548 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5549 0x5fc0, 0x6000, 0x6400, 0x6428,
5550 0x6800, 0x6848, 0x684c, 0x6860,
5551 0x6888, 0x6910, 0x8000 };
5555 memset(p, 0, BNX2_REGDUMP_LEN);
5557 if (!netif_running(bp->dev))
5561 offset = reg_boundaries[0];
5563 while (offset < BNX2_REGDUMP_LEN) {
5564 *p++ = REG_RD(bp, offset);
5566 if (offset == reg_boundaries[i + 1]) {
5567 offset = reg_boundaries[i + 2];
5568 p = (u32 *) (orig_p + offset);
5575 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5577 struct bnx2 *bp = netdev_priv(dev);
5579 if (bp->flags & NO_WOL_FLAG) {
5584 wol->supported = WAKE_MAGIC;
5586 wol->wolopts = WAKE_MAGIC;
5590 memset(&wol->sopass, 0, sizeof(wol->sopass));
5594 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5596 struct bnx2 *bp = netdev_priv(dev);
5598 if (wol->wolopts & ~WAKE_MAGIC)
5601 if (wol->wolopts & WAKE_MAGIC) {
5602 if (bp->flags & NO_WOL_FLAG)
5614 bnx2_nway_reset(struct net_device *dev)
5616 struct bnx2 *bp = netdev_priv(dev);
5619 if (!(bp->autoneg & AUTONEG_SPEED)) {
5623 spin_lock_bh(&bp->phy_lock);
5625 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5628 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5629 spin_unlock_bh(&bp->phy_lock);
5633 /* Force a link down visible on the other side */
5634 if (bp->phy_flags & PHY_SERDES_FLAG) {
5635 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5636 spin_unlock_bh(&bp->phy_lock);
5640 spin_lock_bh(&bp->phy_lock);
5642 bp->current_interval = SERDES_AN_TIMEOUT;
5643 bp->serdes_an_pending = 1;
5644 mod_timer(&bp->timer, jiffies + bp->current_interval);
5647 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5648 bmcr &= ~BMCR_LOOPBACK;
5649 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5651 spin_unlock_bh(&bp->phy_lock);
5657 bnx2_get_eeprom_len(struct net_device *dev)
5659 struct bnx2 *bp = netdev_priv(dev);
5661 if (bp->flash_info == NULL)
5664 return (int) bp->flash_size;
5668 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5671 struct bnx2 *bp = netdev_priv(dev);
5674 /* parameters already validated in ethtool_get_eeprom */
5676 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5682 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5685 struct bnx2 *bp = netdev_priv(dev);
5688 /* parameters already validated in ethtool_set_eeprom */
5690 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5696 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5698 struct bnx2 *bp = netdev_priv(dev);
5700 memset(coal, 0, sizeof(struct ethtool_coalesce));
5702 coal->rx_coalesce_usecs = bp->rx_ticks;
5703 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5704 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5705 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5707 coal->tx_coalesce_usecs = bp->tx_ticks;
5708 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5709 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5710 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5712 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5718 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5720 struct bnx2 *bp = netdev_priv(dev);
5722 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5723 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5725 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5726 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5728 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5729 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5731 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5732 if (bp->rx_quick_cons_trip_int > 0xff)
5733 bp->rx_quick_cons_trip_int = 0xff;
5735 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5736 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5738 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5739 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5741 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5742 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5744 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5745 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5748 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5749 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5750 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5751 bp->stats_ticks = USEC_PER_SEC;
5753 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5754 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5755 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5757 if (netif_running(bp->dev)) {
5758 bnx2_netif_stop(bp);
5760 bnx2_netif_start(bp);
5767 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5769 struct bnx2 *bp = netdev_priv(dev);
5771 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5772 ering->rx_mini_max_pending = 0;
5773 ering->rx_jumbo_max_pending = 0;
5775 ering->rx_pending = bp->rx_ring_size;
5776 ering->rx_mini_pending = 0;
5777 ering->rx_jumbo_pending = 0;
5779 ering->tx_max_pending = MAX_TX_DESC_CNT;
5780 ering->tx_pending = bp->tx_ring_size;
5784 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5786 struct bnx2 *bp = netdev_priv(dev);
5788 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5789 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5790 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5794 if (netif_running(bp->dev)) {
5795 bnx2_netif_stop(bp);
5796 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5801 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5802 bp->tx_ring_size = ering->tx_pending;
5804 if (netif_running(bp->dev)) {
5807 rc = bnx2_alloc_mem(bp);
5811 bnx2_netif_start(bp);
5818 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5820 struct bnx2 *bp = netdev_priv(dev);
5822 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5823 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5824 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5828 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5830 struct bnx2 *bp = netdev_priv(dev);
5832 bp->req_flow_ctrl = 0;
5833 if (epause->rx_pause)
5834 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5835 if (epause->tx_pause)
5836 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5838 if (epause->autoneg) {
5839 bp->autoneg |= AUTONEG_FLOW_CTRL;
5842 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5845 spin_lock_bh(&bp->phy_lock);
5847 bnx2_setup_phy(bp, bp->phy_port);
5849 spin_unlock_bh(&bp->phy_lock);
5855 bnx2_get_rx_csum(struct net_device *dev)
5857 struct bnx2 *bp = netdev_priv(dev);
5863 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5865 struct bnx2 *bp = netdev_priv(dev);
5872 bnx2_set_tso(struct net_device *dev, u32 data)
5874 struct bnx2 *bp = netdev_priv(dev);
5877 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5878 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5879 dev->features |= NETIF_F_TSO6;
5881 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5886 #define BNX2_NUM_STATS 46
5889 char string[ETH_GSTRING_LEN];
5890 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5892 { "rx_error_bytes" },
5894 { "tx_error_bytes" },
5895 { "rx_ucast_packets" },
5896 { "rx_mcast_packets" },
5897 { "rx_bcast_packets" },
5898 { "tx_ucast_packets" },
5899 { "tx_mcast_packets" },
5900 { "tx_bcast_packets" },
5901 { "tx_mac_errors" },
5902 { "tx_carrier_errors" },
5903 { "rx_crc_errors" },
5904 { "rx_align_errors" },
5905 { "tx_single_collisions" },
5906 { "tx_multi_collisions" },
5908 { "tx_excess_collisions" },
5909 { "tx_late_collisions" },
5910 { "tx_total_collisions" },
5913 { "rx_undersize_packets" },
5914 { "rx_oversize_packets" },
5915 { "rx_64_byte_packets" },
5916 { "rx_65_to_127_byte_packets" },
5917 { "rx_128_to_255_byte_packets" },
5918 { "rx_256_to_511_byte_packets" },
5919 { "rx_512_to_1023_byte_packets" },
5920 { "rx_1024_to_1522_byte_packets" },
5921 { "rx_1523_to_9022_byte_packets" },
5922 { "tx_64_byte_packets" },
5923 { "tx_65_to_127_byte_packets" },
5924 { "tx_128_to_255_byte_packets" },
5925 { "tx_256_to_511_byte_packets" },
5926 { "tx_512_to_1023_byte_packets" },
5927 { "tx_1024_to_1522_byte_packets" },
5928 { "tx_1523_to_9022_byte_packets" },
5929 { "rx_xon_frames" },
5930 { "rx_xoff_frames" },
5931 { "tx_xon_frames" },
5932 { "tx_xoff_frames" },
5933 { "rx_mac_ctrl_frames" },
5934 { "rx_filtered_packets" },
5936 { "rx_fw_discards" },
5939 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5941 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5942 STATS_OFFSET32(stat_IfHCInOctets_hi),
5943 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5944 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5945 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5946 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5947 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5948 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5949 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5950 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5951 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5952 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5953 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5954 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5955 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5956 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5957 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5958 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5959 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5960 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5961 STATS_OFFSET32(stat_EtherStatsCollisions),
5962 STATS_OFFSET32(stat_EtherStatsFragments),
5963 STATS_OFFSET32(stat_EtherStatsJabbers),
5964 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5965 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5966 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5967 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5968 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5969 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5970 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5971 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5972 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5973 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5974 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5975 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5976 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5977 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5978 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5979 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5980 STATS_OFFSET32(stat_XonPauseFramesReceived),
5981 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5982 STATS_OFFSET32(stat_OutXonSent),
5983 STATS_OFFSET32(stat_OutXoffSent),
5984 STATS_OFFSET32(stat_MacControlFramesReceived),
5985 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5986 STATS_OFFSET32(stat_IfInMBUFDiscards),
5987 STATS_OFFSET32(stat_FwRxDrop),
5990 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5991 * skipped because of errata.
5993 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5994 8,0,8,8,8,8,8,8,8,8,
5995 4,0,4,4,4,4,4,4,4,4,
5996 4,4,4,4,4,4,4,4,4,4,
5997 4,4,4,4,4,4,4,4,4,4,
6001 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6002 8,0,8,8,8,8,8,8,8,8,
6003 4,4,4,4,4,4,4,4,4,4,
6004 4,4,4,4,4,4,4,4,4,4,
6005 4,4,4,4,4,4,4,4,4,4,
6009 #define BNX2_NUM_TESTS 6
6012 char string[ETH_GSTRING_LEN];
6013 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6014 { "register_test (offline)" },
6015 { "memory_test (offline)" },
6016 { "loopback_test (offline)" },
6017 { "nvram_test (online)" },
6018 { "interrupt_test (online)" },
6019 { "link_test (online)" },
6023 bnx2_get_sset_count(struct net_device *dev, int sset)
6027 return BNX2_NUM_TESTS;
6029 return BNX2_NUM_STATS;
6036 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6038 struct bnx2 *bp = netdev_priv(dev);
6040 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6041 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6044 bnx2_netif_stop(bp);
6045 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6048 if (bnx2_test_registers(bp) != 0) {
6050 etest->flags |= ETH_TEST_FL_FAILED;
6052 if (bnx2_test_memory(bp) != 0) {
6054 etest->flags |= ETH_TEST_FL_FAILED;
6056 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6057 etest->flags |= ETH_TEST_FL_FAILED;
6059 if (!netif_running(bp->dev)) {
6060 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6064 bnx2_netif_start(bp);
6067 /* wait for link up */
6068 for (i = 0; i < 7; i++) {
6071 msleep_interruptible(1000);
6075 if (bnx2_test_nvram(bp) != 0) {
6077 etest->flags |= ETH_TEST_FL_FAILED;
6079 if (bnx2_test_intr(bp) != 0) {
6081 etest->flags |= ETH_TEST_FL_FAILED;
6084 if (bnx2_test_link(bp) != 0) {
6086 etest->flags |= ETH_TEST_FL_FAILED;
6092 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6094 switch (stringset) {
6096 memcpy(buf, bnx2_stats_str_arr,
6097 sizeof(bnx2_stats_str_arr));
6100 memcpy(buf, bnx2_tests_str_arr,
6101 sizeof(bnx2_tests_str_arr));
6107 bnx2_get_ethtool_stats(struct net_device *dev,
6108 struct ethtool_stats *stats, u64 *buf)
6110 struct bnx2 *bp = netdev_priv(dev);
6112 u32 *hw_stats = (u32 *) bp->stats_blk;
6113 u8 *stats_len_arr = NULL;
6115 if (hw_stats == NULL) {
6116 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6120 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6121 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6122 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6123 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6124 stats_len_arr = bnx2_5706_stats_len_arr;
6126 stats_len_arr = bnx2_5708_stats_len_arr;
6128 for (i = 0; i < BNX2_NUM_STATS; i++) {
6129 if (stats_len_arr[i] == 0) {
6130 /* skip this counter */
6134 if (stats_len_arr[i] == 4) {
6135 /* 4-byte counter */
6137 *(hw_stats + bnx2_stats_offset_arr[i]);
6140 /* 8-byte counter */
6141 buf[i] = (((u64) *(hw_stats +
6142 bnx2_stats_offset_arr[i])) << 32) +
6143 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6148 bnx2_phys_id(struct net_device *dev, u32 data)
6150 struct bnx2 *bp = netdev_priv(dev);
6157 save = REG_RD(bp, BNX2_MISC_CFG);
6158 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6160 for (i = 0; i < (data * 2); i++) {
6162 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6165 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6166 BNX2_EMAC_LED_1000MB_OVERRIDE |
6167 BNX2_EMAC_LED_100MB_OVERRIDE |
6168 BNX2_EMAC_LED_10MB_OVERRIDE |
6169 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6170 BNX2_EMAC_LED_TRAFFIC);
6172 msleep_interruptible(500);
6173 if (signal_pending(current))
6176 REG_WR(bp, BNX2_EMAC_LED, 0);
6177 REG_WR(bp, BNX2_MISC_CFG, save);
6182 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6184 struct bnx2 *bp = netdev_priv(dev);
6186 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6187 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6189 return (ethtool_op_set_tx_csum(dev, data));
6192 static const struct ethtool_ops bnx2_ethtool_ops = {
6193 .get_settings = bnx2_get_settings,
6194 .set_settings = bnx2_set_settings,
6195 .get_drvinfo = bnx2_get_drvinfo,
6196 .get_regs_len = bnx2_get_regs_len,
6197 .get_regs = bnx2_get_regs,
6198 .get_wol = bnx2_get_wol,
6199 .set_wol = bnx2_set_wol,
6200 .nway_reset = bnx2_nway_reset,
6201 .get_link = ethtool_op_get_link,
6202 .get_eeprom_len = bnx2_get_eeprom_len,
6203 .get_eeprom = bnx2_get_eeprom,
6204 .set_eeprom = bnx2_set_eeprom,
6205 .get_coalesce = bnx2_get_coalesce,
6206 .set_coalesce = bnx2_set_coalesce,
6207 .get_ringparam = bnx2_get_ringparam,
6208 .set_ringparam = bnx2_set_ringparam,
6209 .get_pauseparam = bnx2_get_pauseparam,
6210 .set_pauseparam = bnx2_set_pauseparam,
6211 .get_rx_csum = bnx2_get_rx_csum,
6212 .set_rx_csum = bnx2_set_rx_csum,
6213 .set_tx_csum = bnx2_set_tx_csum,
6214 .set_sg = ethtool_op_set_sg,
6215 .set_tso = bnx2_set_tso,
6216 .self_test = bnx2_self_test,
6217 .get_strings = bnx2_get_strings,
6218 .phys_id = bnx2_phys_id,
6219 .get_ethtool_stats = bnx2_get_ethtool_stats,
6220 .get_sset_count = bnx2_get_sset_count,
6223 /* Called with rtnl_lock */
6225 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6227 struct mii_ioctl_data *data = if_mii(ifr);
6228 struct bnx2 *bp = netdev_priv(dev);
6233 data->phy_id = bp->phy_addr;
6239 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6242 if (!netif_running(dev))
6245 spin_lock_bh(&bp->phy_lock);
6246 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6247 spin_unlock_bh(&bp->phy_lock);
6249 data->val_out = mii_regval;
6255 if (!capable(CAP_NET_ADMIN))
6258 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6261 if (!netif_running(dev))
6264 spin_lock_bh(&bp->phy_lock);
6265 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6266 spin_unlock_bh(&bp->phy_lock);
6277 /* Called with rtnl_lock */
6279 bnx2_change_mac_addr(struct net_device *dev, void *p)
6281 struct sockaddr *addr = p;
6282 struct bnx2 *bp = netdev_priv(dev);
6284 if (!is_valid_ether_addr(addr->sa_data))
6287 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6288 if (netif_running(dev))
6289 bnx2_set_mac_addr(bp);
6294 /* Called with rtnl_lock */
6296 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6298 struct bnx2 *bp = netdev_priv(dev);
6300 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6301 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6305 if (netif_running(dev)) {
6306 bnx2_netif_stop(bp);
6310 bnx2_netif_start(bp);
6315 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6317 poll_bnx2(struct net_device *dev)
6319 struct bnx2 *bp = netdev_priv(dev);
6321 disable_irq(bp->pdev->irq);
6322 bnx2_interrupt(bp->pdev->irq, dev);
6323 enable_irq(bp->pdev->irq);
6327 static void __devinit
6328 bnx2_get_5709_media(struct bnx2 *bp)
6330 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6331 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6334 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6336 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6337 bp->phy_flags |= PHY_SERDES_FLAG;
6341 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6342 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6344 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6346 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6351 bp->phy_flags |= PHY_SERDES_FLAG;
6359 bp->phy_flags |= PHY_SERDES_FLAG;
6365 static void __devinit
6366 bnx2_get_pci_speed(struct bnx2 *bp)
6370 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6371 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6374 bp->flags |= PCIX_FLAG;
6376 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6378 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6380 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6381 bp->bus_speed_mhz = 133;
6384 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6385 bp->bus_speed_mhz = 100;
6388 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6389 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6390 bp->bus_speed_mhz = 66;
6393 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6394 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6395 bp->bus_speed_mhz = 50;
6398 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6399 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6400 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6401 bp->bus_speed_mhz = 33;
6406 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6407 bp->bus_speed_mhz = 66;
6409 bp->bus_speed_mhz = 33;
6412 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6413 bp->flags |= PCI_32BIT_FLAG;
6417 static int __devinit
6418 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6421 unsigned long mem_len;
6424 u64 dma_mask, persist_dma_mask;
6426 SET_NETDEV_DEV(dev, &pdev->dev);
6427 bp = netdev_priv(dev);
6432 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6433 rc = pci_enable_device(pdev);
6435 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6439 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6441 "Cannot find PCI device base address, aborting.\n");
6443 goto err_out_disable;
6446 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6448 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6449 goto err_out_disable;
6452 pci_set_master(pdev);
6454 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6455 if (bp->pm_cap == 0) {
6457 "Cannot find power management capability, aborting.\n");
6459 goto err_out_release;
6465 spin_lock_init(&bp->phy_lock);
6466 spin_lock_init(&bp->indirect_lock);
6467 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6469 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6470 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6471 dev->mem_end = dev->mem_start + mem_len;
6472 dev->irq = pdev->irq;
6474 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6477 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6479 goto err_out_release;
6482 /* Configure byte swap and enable write to the reg_window registers.
6483 * Rely on CPU to do target byte swapping on big endian systems
6484 * The chip's target access swapping will not swap all accesses
6486 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6487 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6488 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6490 bnx2_set_power_state(bp, PCI_D0);
6492 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6494 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6495 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6497 "Cannot find PCIE capability, aborting.\n");
6501 bp->flags |= PCIE_FLAG;
6503 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6504 if (bp->pcix_cap == 0) {
6506 "Cannot find PCIX capability, aborting.\n");
6512 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6513 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6514 bp->flags |= MSI_CAP_FLAG;
6517 /* 5708 cannot support DMA addresses > 40-bit. */
6518 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6519 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6521 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6523 /* Configure DMA attributes. */
6524 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6525 dev->features |= NETIF_F_HIGHDMA;
6526 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6529 "pci_set_consistent_dma_mask failed, aborting.\n");
6532 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6533 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6537 if (!(bp->flags & PCIE_FLAG))
6538 bnx2_get_pci_speed(bp);
6540 /* 5706A0 may falsely detect SERR and PERR. */
6541 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6542 reg = REG_RD(bp, PCI_COMMAND);
6543 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6544 REG_WR(bp, PCI_COMMAND, reg);
6546 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6547 !(bp->flags & PCIX_FLAG)) {
6550 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6554 bnx2_init_nvram(bp);
6556 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6558 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6559 BNX2_SHM_HDR_SIGNATURE_SIG) {
6560 u32 off = PCI_FUNC(pdev->devfn) << 2;
6562 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6564 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6566 /* Get the permanent MAC address. First we need to make sure the
6567 * firmware is actually running.
6569 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6571 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6572 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6573 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6578 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6579 for (i = 0, j = 0; i < 3; i++) {
6582 num = (u8) (reg >> (24 - (i * 8)));
6583 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6584 if (num >= k || !skip0 || k == 1) {
6585 bp->fw_version[j++] = (num / k) + '0';
6590 bp->fw_version[j++] = '.';
6592 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6593 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6596 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
6597 bp->flags |= ASF_ENABLE_FLAG;
6599 for (i = 0; i < 30; i++) {
6600 reg = REG_RD_IND(bp, bp->shmem_base +
6601 BNX2_BC_STATE_CONDITION);
6602 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6607 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6608 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6609 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6610 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6612 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6614 bp->fw_version[j++] = ' ';
6615 for (i = 0; i < 3; i++) {
6616 reg = REG_RD_IND(bp, addr + i * 4);
6618 memcpy(&bp->fw_version[j], ®, 4);
6623 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6624 bp->mac_addr[0] = (u8) (reg >> 8);
6625 bp->mac_addr[1] = (u8) reg;
6627 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6628 bp->mac_addr[2] = (u8) (reg >> 24);
6629 bp->mac_addr[3] = (u8) (reg >> 16);
6630 bp->mac_addr[4] = (u8) (reg >> 8);
6631 bp->mac_addr[5] = (u8) reg;
6633 bp->tx_ring_size = MAX_TX_DESC_CNT;
6634 bnx2_set_rx_ring_size(bp, 255);
6638 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6640 bp->tx_quick_cons_trip_int = 20;
6641 bp->tx_quick_cons_trip = 20;
6642 bp->tx_ticks_int = 80;
6645 bp->rx_quick_cons_trip_int = 6;
6646 bp->rx_quick_cons_trip = 6;
6647 bp->rx_ticks_int = 18;
6650 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6652 bp->timer_interval = HZ;
6653 bp->current_interval = HZ;
6657 /* Disable WOL support if we are running on a SERDES chip. */
6658 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6659 bnx2_get_5709_media(bp);
6660 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6661 bp->phy_flags |= PHY_SERDES_FLAG;
6663 bp->phy_port = PORT_TP;
6664 if (bp->phy_flags & PHY_SERDES_FLAG) {
6665 bp->phy_port = PORT_FIBRE;
6666 reg = REG_RD_IND(bp, bp->shmem_base +
6667 BNX2_SHARED_HW_CFG_CONFIG);
6668 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6669 bp->flags |= NO_WOL_FLAG;
6672 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6674 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6675 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6677 bnx2_init_remote_phy(bp);
6679 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6680 CHIP_NUM(bp) == CHIP_NUM_5708)
6681 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6682 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6683 (CHIP_REV(bp) == CHIP_REV_Ax ||
6684 CHIP_REV(bp) == CHIP_REV_Bx))
6685 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6687 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6688 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6689 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
6690 bp->flags |= NO_WOL_FLAG;
6694 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6695 bp->tx_quick_cons_trip_int =
6696 bp->tx_quick_cons_trip;
6697 bp->tx_ticks_int = bp->tx_ticks;
6698 bp->rx_quick_cons_trip_int =
6699 bp->rx_quick_cons_trip;
6700 bp->rx_ticks_int = bp->rx_ticks;
6701 bp->comp_prod_trip_int = bp->comp_prod_trip;
6702 bp->com_ticks_int = bp->com_ticks;
6703 bp->cmd_ticks_int = bp->cmd_ticks;
6706 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6708 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6709 * with byte enables disabled on the unused 32-bit word. This is legal
6710 * but causes problems on the AMD 8132 which will eventually stop
6711 * responding after a while.
6713 * AMD believes this incompatibility is unique to the 5706, and
6714 * prefers to locally disable MSI rather than globally disabling it.
6716 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6717 struct pci_dev *amd_8132 = NULL;
6719 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6720 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6723 if (amd_8132->revision >= 0x10 &&
6724 amd_8132->revision <= 0x13) {
6726 pci_dev_put(amd_8132);
6732 bnx2_set_default_link(bp);
6733 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6735 init_timer(&bp->timer);
6736 bp->timer.expires = RUN_AT(bp->timer_interval);
6737 bp->timer.data = (unsigned long) bp;
6738 bp->timer.function = bnx2_timer;
6744 iounmap(bp->regview);
6749 pci_release_regions(pdev);
6752 pci_disable_device(pdev);
6753 pci_set_drvdata(pdev, NULL);
6759 static char * __devinit
6760 bnx2_bus_string(struct bnx2 *bp, char *str)
6764 if (bp->flags & PCIE_FLAG) {
6765 s += sprintf(s, "PCI Express");
6767 s += sprintf(s, "PCI");
6768 if (bp->flags & PCIX_FLAG)
6769 s += sprintf(s, "-X");
6770 if (bp->flags & PCI_32BIT_FLAG)
6771 s += sprintf(s, " 32-bit");
6773 s += sprintf(s, " 64-bit");
6774 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6779 static int __devinit
6780 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6782 static int version_printed = 0;
6783 struct net_device *dev = NULL;
6787 DECLARE_MAC_BUF(mac);
6789 if (version_printed++ == 0)
6790 printk(KERN_INFO "%s", version);
6792 /* dev zeroed in init_etherdev */
6793 dev = alloc_etherdev(sizeof(*bp));
6798 rc = bnx2_init_board(pdev, dev);
6804 dev->open = bnx2_open;
6805 dev->hard_start_xmit = bnx2_start_xmit;
6806 dev->stop = bnx2_close;
6807 dev->get_stats = bnx2_get_stats;
6808 dev->set_multicast_list = bnx2_set_rx_mode;
6809 dev->do_ioctl = bnx2_ioctl;
6810 dev->set_mac_address = bnx2_change_mac_addr;
6811 dev->change_mtu = bnx2_change_mtu;
6812 dev->tx_timeout = bnx2_tx_timeout;
6813 dev->watchdog_timeo = TX_TIMEOUT;
6815 dev->vlan_rx_register = bnx2_vlan_rx_register;
6817 dev->ethtool_ops = &bnx2_ethtool_ops;
6819 bp = netdev_priv(dev);
6820 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
6822 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6823 dev->poll_controller = poll_bnx2;
6826 pci_set_drvdata(pdev, dev);
6828 memcpy(dev->dev_addr, bp->mac_addr, 6);
6829 memcpy(dev->perm_addr, bp->mac_addr, 6);
6830 bp->name = board_info[ent->driver_data].name;
6832 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6833 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6834 dev->features |= NETIF_F_IPV6_CSUM;
6837 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6839 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6840 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6841 dev->features |= NETIF_F_TSO6;
6843 if ((rc = register_netdev(dev))) {
6844 dev_err(&pdev->dev, "Cannot register net device\n");
6846 iounmap(bp->regview);
6847 pci_release_regions(pdev);
6848 pci_disable_device(pdev);
6849 pci_set_drvdata(pdev, NULL);
6854 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6855 "IRQ %d, node addr %s\n",
6858 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6859 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6860 bnx2_bus_string(bp, str),
6862 bp->pdev->irq, print_mac(mac, dev->dev_addr));
6867 static void __devexit
6868 bnx2_remove_one(struct pci_dev *pdev)
6870 struct net_device *dev = pci_get_drvdata(pdev);
6871 struct bnx2 *bp = netdev_priv(dev);
6873 flush_scheduled_work();
6875 unregister_netdev(dev);
6878 iounmap(bp->regview);
6881 pci_release_regions(pdev);
6882 pci_disable_device(pdev);
6883 pci_set_drvdata(pdev, NULL);
6887 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6889 struct net_device *dev = pci_get_drvdata(pdev);
6890 struct bnx2 *bp = netdev_priv(dev);
6893 /* PCI register 4 needs to be saved whether netif_running() or not.
6894 * MSI address and data need to be saved if using MSI and
6897 pci_save_state(pdev);
6898 if (!netif_running(dev))
6901 flush_scheduled_work();
6902 bnx2_netif_stop(bp);
6903 netif_device_detach(dev);
6904 del_timer_sync(&bp->timer);
6905 if (bp->flags & NO_WOL_FLAG)
6906 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6908 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6910 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6911 bnx2_reset_chip(bp, reset_code);
6913 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6918 bnx2_resume(struct pci_dev *pdev)
6920 struct net_device *dev = pci_get_drvdata(pdev);
6921 struct bnx2 *bp = netdev_priv(dev);
6923 pci_restore_state(pdev);
6924 if (!netif_running(dev))
6927 bnx2_set_power_state(bp, PCI_D0);
6928 netif_device_attach(dev);
6930 bnx2_netif_start(bp);
6934 static struct pci_driver bnx2_pci_driver = {
6935 .name = DRV_MODULE_NAME,
6936 .id_table = bnx2_pci_tbl,
6937 .probe = bnx2_init_one,
6938 .remove = __devexit_p(bnx2_remove_one),
6939 .suspend = bnx2_suspend,
6940 .resume = bnx2_resume,
6943 static int __init bnx2_init(void)
6945 return pci_register_driver(&bnx2_pci_driver);
6948 static void __exit bnx2_cleanup(void)
6950 pci_unregister_driver(&bnx2_pci_driver);
6953 module_init(bnx2_init);
6954 module_exit(bnx2_cleanup);