2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
5 * See LICENSE.qla3xxx for copyright and licensing details.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/pci.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_ether.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/ethtool.h>
33 #include <linux/skbuff.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/if_vlan.h>
36 #include <linux/delay.h>
41 #define DRV_NAME "qla3xxx"
42 #define DRV_STRING "QLogic ISP3XXX Network Driver"
43 #define DRV_VERSION "v2.03.00-k5"
45 static const char ql3xxx_driver_name[] = DRV_NAME;
46 static const char ql3xxx_driver_version[] = DRV_VERSION;
48 #define TIMED_OUT_MSG \
49 "Timed out waiting for management port to get free before issuing command\n"
51 MODULE_AUTHOR("QLogic Corporation");
52 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
53 MODULE_LICENSE("GPL");
54 MODULE_VERSION(DRV_VERSION);
56 static const u32 default_msg
57 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
58 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
60 static int debug = -1; /* defaults above */
61 module_param(debug, int, 0);
62 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
65 module_param(msi, int, 0);
66 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
68 static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
69 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
70 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
71 /* required last entry */
75 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
78 * These are the known PHY's which are used
88 PHY_DEVICE_et phyDevice;
94 static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
95 {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
96 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
97 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
102 * Caller must take hw_lock.
104 static int ql_sem_spinlock(struct ql3_adapter *qdev,
105 u32 sem_mask, u32 sem_bits)
107 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
109 unsigned int seconds = 3;
112 writel((sem_mask | sem_bits),
113 &port_regs->CommonRegs.semaphoreReg);
114 value = readl(&port_regs->CommonRegs.semaphoreReg);
115 if ((value & (sem_mask >> 16)) == sem_bits)
122 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
124 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
125 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
126 readl(&port_regs->CommonRegs.semaphoreReg);
129 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
131 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
134 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
135 value = readl(&port_regs->CommonRegs.semaphoreReg);
136 return ((value & (sem_mask >> 16)) == sem_bits);
140 * Caller holds hw_lock.
142 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
150 if (ql_sem_lock(qdev,
152 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
154 netdev_printk(KERN_DEBUG, qdev->ndev,
155 "driver lock acquired\n");
160 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
164 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
166 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
168 writel(((ISP_CONTROL_NP_MASK << 16) | page),
169 &port_regs->CommonRegs.ispControlStatus);
170 readl(&port_regs->CommonRegs.ispControlStatus);
171 qdev->current_page = page;
174 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
178 unsigned long hw_flags;
180 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
182 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
187 static u32 ql_read_common_reg(struct ql3_adapter *qdev,
193 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
196 unsigned long hw_flags;
198 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
200 if (qdev->current_page != 0)
201 ql_set_register_page(qdev,0);
204 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
208 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
210 if (qdev->current_page != 0)
211 ql_set_register_page(qdev,0);
215 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
216 u32 __iomem *reg, u32 value)
218 unsigned long hw_flags;
220 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
223 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
226 static void ql_write_common_reg(struct ql3_adapter *qdev,
227 u32 __iomem *reg, u32 value)
233 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
234 u32 __iomem *reg, u32 value)
241 static void ql_write_page0_reg(struct ql3_adapter *qdev,
242 u32 __iomem *reg, u32 value)
244 if (qdev->current_page != 0)
245 ql_set_register_page(qdev,0);
251 * Caller holds hw_lock. Only called during init.
253 static void ql_write_page1_reg(struct ql3_adapter *qdev,
254 u32 __iomem *reg, u32 value)
256 if (qdev->current_page != 1)
257 ql_set_register_page(qdev,1);
263 * Caller holds hw_lock. Only called during init.
265 static void ql_write_page2_reg(struct ql3_adapter *qdev,
266 u32 __iomem *reg, u32 value)
268 if (qdev->current_page != 2)
269 ql_set_register_page(qdev,2);
274 static void ql_disable_interrupts(struct ql3_adapter *qdev)
276 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
278 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
279 (ISP_IMR_ENABLE_INT << 16));
283 static void ql_enable_interrupts(struct ql3_adapter *qdev)
285 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
287 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
288 ((0xff << 16) | ISP_IMR_ENABLE_INT));
292 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
293 struct ql_rcv_buf_cb *lrg_buf_cb)
297 lrg_buf_cb->next = NULL;
299 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
300 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
302 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
303 qdev->lrg_buf_free_tail = lrg_buf_cb;
306 if (!lrg_buf_cb->skb) {
307 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
308 qdev->lrg_buffer_len);
309 if (unlikely(!lrg_buf_cb->skb)) {
310 netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
311 qdev->lrg_buf_skb_check++;
314 * We save some space to copy the ethhdr from first
317 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
318 map = pci_map_single(qdev->pdev,
319 lrg_buf_cb->skb->data,
320 qdev->lrg_buffer_len -
323 err = pci_dma_mapping_error(qdev->pdev, map);
325 netdev_err(qdev->ndev,
326 "PCI mapping failed with error: %d\n",
328 dev_kfree_skb(lrg_buf_cb->skb);
329 lrg_buf_cb->skb = NULL;
331 qdev->lrg_buf_skb_check++;
335 lrg_buf_cb->buf_phy_addr_low =
336 cpu_to_le32(LS_64BITS(map));
337 lrg_buf_cb->buf_phy_addr_high =
338 cpu_to_le32(MS_64BITS(map));
339 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
340 dma_unmap_len_set(lrg_buf_cb, maplen,
341 qdev->lrg_buffer_len -
346 qdev->lrg_buf_free_count++;
349 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
352 struct ql_rcv_buf_cb *lrg_buf_cb;
354 if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
355 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
356 qdev->lrg_buf_free_tail = NULL;
357 qdev->lrg_buf_free_count--;
363 static u32 addrBits = EEPROM_NO_ADDR_BITS;
364 static u32 dataBits = EEPROM_NO_DATA_BITS;
366 static void fm93c56a_deselect(struct ql3_adapter *qdev);
367 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
368 unsigned short *value);
371 * Caller holds hw_lock.
373 static void fm93c56a_select(struct ql3_adapter *qdev)
375 struct ql3xxx_port_registers __iomem *port_regs =
376 qdev->mem_map_registers;
378 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
379 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
380 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
381 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
382 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
386 * Caller holds hw_lock.
388 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
394 struct ql3xxx_port_registers __iomem *port_regs =
395 qdev->mem_map_registers;
397 /* Clock in a zero, then do the start bit */
398 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
399 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
401 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
402 ISP_NVRAM_MASK | qdev->
403 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
404 AUBURN_EEPROM_CLK_RISE);
405 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
406 ISP_NVRAM_MASK | qdev->
407 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
408 AUBURN_EEPROM_CLK_FALL);
410 mask = 1 << (FM93C56A_CMD_BITS - 1);
411 /* Force the previous data bit to be different */
412 previousBit = 0xffff;
413 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
415 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
416 if (previousBit != dataBit) {
418 * If the bit changed, then change the DO state to
421 ql_write_nvram_reg(qdev,
422 &port_regs->CommonRegs.
423 serialPortInterfaceReg,
424 ISP_NVRAM_MASK | qdev->
425 eeprom_cmd_data | dataBit);
426 previousBit = dataBit;
428 ql_write_nvram_reg(qdev,
429 &port_regs->CommonRegs.
430 serialPortInterfaceReg,
431 ISP_NVRAM_MASK | qdev->
432 eeprom_cmd_data | dataBit |
433 AUBURN_EEPROM_CLK_RISE);
434 ql_write_nvram_reg(qdev,
435 &port_regs->CommonRegs.
436 serialPortInterfaceReg,
437 ISP_NVRAM_MASK | qdev->
438 eeprom_cmd_data | dataBit |
439 AUBURN_EEPROM_CLK_FALL);
443 mask = 1 << (addrBits - 1);
444 /* Force the previous data bit to be different */
445 previousBit = 0xffff;
446 for (i = 0; i < addrBits; i++) {
448 (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
450 if (previousBit != dataBit) {
452 * If the bit changed, then change the DO state to
455 ql_write_nvram_reg(qdev,
456 &port_regs->CommonRegs.
457 serialPortInterfaceReg,
458 ISP_NVRAM_MASK | qdev->
459 eeprom_cmd_data | dataBit);
460 previousBit = dataBit;
462 ql_write_nvram_reg(qdev,
463 &port_regs->CommonRegs.
464 serialPortInterfaceReg,
465 ISP_NVRAM_MASK | qdev->
466 eeprom_cmd_data | dataBit |
467 AUBURN_EEPROM_CLK_RISE);
468 ql_write_nvram_reg(qdev,
469 &port_regs->CommonRegs.
470 serialPortInterfaceReg,
471 ISP_NVRAM_MASK | qdev->
472 eeprom_cmd_data | dataBit |
473 AUBURN_EEPROM_CLK_FALL);
474 eepromAddr = eepromAddr << 1;
479 * Caller holds hw_lock.
481 static void fm93c56a_deselect(struct ql3_adapter *qdev)
483 struct ql3xxx_port_registers __iomem *port_regs =
484 qdev->mem_map_registers;
485 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
486 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
487 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
491 * Caller holds hw_lock.
493 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
498 struct ql3xxx_port_registers __iomem *port_regs =
499 qdev->mem_map_registers;
501 /* Read the data bits */
502 /* The first bit is a dummy. Clock right over it. */
503 for (i = 0; i < dataBits; i++) {
504 ql_write_nvram_reg(qdev,
505 &port_regs->CommonRegs.
506 serialPortInterfaceReg,
507 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
508 AUBURN_EEPROM_CLK_RISE);
509 ql_write_nvram_reg(qdev,
510 &port_regs->CommonRegs.
511 serialPortInterfaceReg,
512 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
513 AUBURN_EEPROM_CLK_FALL);
517 &port_regs->CommonRegs.
518 serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
519 data = (data << 1) | dataBit;
525 * Caller holds hw_lock.
527 static void eeprom_readword(struct ql3_adapter *qdev,
528 u32 eepromAddr, unsigned short *value)
530 fm93c56a_select(qdev);
531 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
532 fm93c56a_datain(qdev, value);
533 fm93c56a_deselect(qdev);
536 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
538 __le16 *p = (__le16 *)ndev->dev_addr;
539 p[0] = cpu_to_le16(addr[0]);
540 p[1] = cpu_to_le16(addr[1]);
541 p[2] = cpu_to_le16(addr[2]);
544 static int ql_get_nvram_params(struct ql3_adapter *qdev)
549 unsigned long hw_flags;
551 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
553 pEEPROMData = (u16 *) & qdev->nvram_data;
554 qdev->eeprom_cmd_data = 0;
555 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
556 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
558 pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
559 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
563 for (index = 0; index < EEPROM_SIZE; index++) {
564 eeprom_readword(qdev, index, pEEPROMData);
565 checksum += *pEEPROMData;
568 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
571 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
573 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
577 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
581 static const u32 PHYAddr[2] = {
582 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
585 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
587 struct ql3xxx_port_registers __iomem *port_regs =
588 qdev->mem_map_registers;
593 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
594 if (!(temp & MAC_MII_STATUS_BSY))
602 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
604 struct ql3xxx_port_registers __iomem *port_regs =
605 qdev->mem_map_registers;
608 if (qdev->numPorts > 1) {
609 /* Auto scan will cycle through multiple ports */
610 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
612 scanControl = MAC_MII_CONTROL_SC;
616 * Scan register 1 of PHY/PETBI,
617 * Set up to scan both devices
618 * The autoscan starts from the first register, completes
619 * the last one before rolling over to the first
621 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
622 PHYAddr[0] | MII_SCAN_REGISTER);
624 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
626 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
629 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
632 struct ql3xxx_port_registers __iomem *port_regs =
633 qdev->mem_map_registers;
635 /* See if scan mode is enabled before we turn it off */
636 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
637 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
638 /* Scan is enabled */
641 /* Scan is disabled */
646 * When disabling scan mode you must first change the MII register
649 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
650 PHYAddr[0] | MII_SCAN_REGISTER);
652 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
653 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
654 MAC_MII_CONTROL_RC) << 16));
659 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
660 u16 regAddr, u16 value, u32 phyAddr)
662 struct ql3xxx_port_registers __iomem *port_regs =
663 qdev->mem_map_registers;
666 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
668 if (ql_wait_for_mii_ready(qdev)) {
669 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
673 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
676 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
678 /* Wait for write to complete 9/10/04 SJP */
679 if (ql_wait_for_mii_ready(qdev)) {
680 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
685 ql_mii_enable_scan_mode(qdev);
690 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
691 u16 * value, u32 phyAddr)
693 struct ql3xxx_port_registers __iomem *port_regs =
694 qdev->mem_map_registers;
698 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
700 if (ql_wait_for_mii_ready(qdev)) {
701 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
705 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
708 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
709 (MAC_MII_CONTROL_RC << 16));
711 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
712 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
714 /* Wait for the read to complete */
715 if (ql_wait_for_mii_ready(qdev)) {
716 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
720 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
724 ql_mii_enable_scan_mode(qdev);
729 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
731 struct ql3xxx_port_registers __iomem *port_regs =
732 qdev->mem_map_registers;
734 ql_mii_disable_scan_mode(qdev);
736 if (ql_wait_for_mii_ready(qdev)) {
737 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
741 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
742 qdev->PHYAddr | regAddr);
744 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
746 /* Wait for write to complete. */
747 if (ql_wait_for_mii_ready(qdev)) {
748 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
752 ql_mii_enable_scan_mode(qdev);
757 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
760 struct ql3xxx_port_registers __iomem *port_regs =
761 qdev->mem_map_registers;
763 ql_mii_disable_scan_mode(qdev);
765 if (ql_wait_for_mii_ready(qdev)) {
766 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
770 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
771 qdev->PHYAddr | regAddr);
773 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
774 (MAC_MII_CONTROL_RC << 16));
776 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
777 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
779 /* Wait for the read to complete */
780 if (ql_wait_for_mii_ready(qdev)) {
781 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
785 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
788 ql_mii_enable_scan_mode(qdev);
793 static void ql_petbi_reset(struct ql3_adapter *qdev)
795 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
798 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
802 /* Enable Auto-negotiation sense */
803 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®);
804 reg |= PETBI_TBI_AUTO_SENSE;
805 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
807 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
808 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
810 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
811 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
812 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
816 static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
818 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
819 PHYAddr[qdev->mac_index]);
822 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
826 /* Enable Auto-negotiation sense */
827 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®,
828 PHYAddr[qdev->mac_index]);
829 reg |= PETBI_TBI_AUTO_SENSE;
830 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
831 PHYAddr[qdev->mac_index]);
833 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
834 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
835 PHYAddr[qdev->mac_index]);
837 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
838 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
839 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
840 PHYAddr[qdev->mac_index]);
843 static void ql_petbi_init(struct ql3_adapter *qdev)
845 ql_petbi_reset(qdev);
846 ql_petbi_start_neg(qdev);
849 static void ql_petbi_init_ex(struct ql3_adapter *qdev)
851 ql_petbi_reset_ex(qdev);
852 ql_petbi_start_neg_ex(qdev);
855 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
859 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0)
862 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
865 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
867 netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
868 /* power down device bit 11 = 1 */
869 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
870 /* enable diagnostic mode bit 2 = 1 */
871 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
872 /* 1000MB amplitude adjust (see Agere errata) */
873 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
874 /* 1000MB amplitude adjust (see Agere errata) */
875 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
876 /* 100MB amplitude adjust (see Agere errata) */
877 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
878 /* 100MB amplitude adjust (see Agere errata) */
879 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
880 /* 10MB amplitude adjust (see Agere errata) */
881 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
882 /* 10MB amplitude adjust (see Agere errata) */
883 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
884 /* point to hidden reg 0x2806 */
885 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
886 /* Write new PHYAD w/bit 5 set */
887 ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
889 * Disable diagnostic mode bit 2 = 0
890 * Power up device bit 11 = 0
891 * Link up (on) and activity (blink)
893 ql_mii_write_reg(qdev, 0x12, 0x840a);
894 ql_mii_write_reg(qdev, 0x00, 0x1140);
895 ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
898 static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
899 u16 phyIdReg0, u16 phyIdReg1)
901 PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
906 if (phyIdReg0 == 0xffff) {
910 if (phyIdReg1 == 0xffff) {
914 /* oui is split between two registers */
915 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
917 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
919 /* Scan table for this PHY */
920 for(i = 0; i < MAX_PHY_DEV_TYPES; i++) {
921 if ((oui == PHY_DEVICES[i].phyIdOUI) &&
922 (model == PHY_DEVICES[i].phyIdModel)) {
923 result = PHY_DEVICES[i].phyDevice;
925 netdev_info(qdev->ndev, "Phy: %s\n",
926 PHY_DEVICES[i].name);
935 static int ql_phy_get_speed(struct ql3_adapter *qdev)
939 switch(qdev->phyType) {
940 case PHY_AGERE_ET1011C:
942 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0)
945 reg = (reg >> 8) & 3;
949 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0)
952 reg = (((reg & 0x18) >> 3) & 3);
967 static int ql_is_full_dup(struct ql3_adapter *qdev)
971 switch(qdev->phyType) {
972 case PHY_AGERE_ET1011C:
974 if (ql_mii_read_reg(qdev, 0x1A, ®))
977 return ((reg & 0x0080) && (reg & 0x1000)) != 0;
979 case PHY_VITESSE_VSC8211:
982 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0)
984 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
989 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
993 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0)
996 return (reg & PHY_NEG_PAUSE) != 0;
999 static int PHY_Setup(struct ql3_adapter *qdev)
1003 bool agereAddrChangeNeeded = false;
1007 /* Determine the PHY we are using by reading the ID's */
1008 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1);
1010 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
1014 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2);
1016 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
1020 /* Check if we have a Agere PHY */
1021 if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
1023 /* Determine which MII address we should be using
1024 determined by the index of the card */
1025 if (qdev->mac_index == 0) {
1026 miiAddr = MII_AGERE_ADDR_1;
1028 miiAddr = MII_AGERE_ADDR_2;
1031 err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr);
1033 netdev_err(qdev->ndev,
1034 "Could not read from reg PHY_ID_0_REG after Agere detected\n");
1038 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr);
1040 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1044 /* We need to remember to initialize the Agere PHY */
1045 agereAddrChangeNeeded = true;
1048 /* Determine the particular PHY we have on board to apply
1049 PHY specific initializations */
1050 qdev->phyType = getPhyType(qdev, reg1, reg2);
1052 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1053 /* need this here so address gets changed */
1054 phyAgereSpecificInit(qdev, miiAddr);
1055 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1056 netdev_err(qdev->ndev, "PHY is unknown\n");
1064 * Caller holds hw_lock.
1066 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1068 struct ql3xxx_port_registers __iomem *port_regs =
1069 qdev->mem_map_registers;
1073 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1075 value = (MAC_CONFIG_REG_PE << 16);
1077 if (qdev->mac_index)
1078 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1080 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1084 * Caller holds hw_lock.
1086 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1088 struct ql3xxx_port_registers __iomem *port_regs =
1089 qdev->mem_map_registers;
1093 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1095 value = (MAC_CONFIG_REG_SR << 16);
1097 if (qdev->mac_index)
1098 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1100 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1104 * Caller holds hw_lock.
1106 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1108 struct ql3xxx_port_registers __iomem *port_regs =
1109 qdev->mem_map_registers;
1113 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1115 value = (MAC_CONFIG_REG_GM << 16);
1117 if (qdev->mac_index)
1118 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1120 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1124 * Caller holds hw_lock.
1126 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1128 struct ql3xxx_port_registers __iomem *port_regs =
1129 qdev->mem_map_registers;
1133 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1135 value = (MAC_CONFIG_REG_FD << 16);
1137 if (qdev->mac_index)
1138 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1140 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1144 * Caller holds hw_lock.
1146 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1148 struct ql3xxx_port_registers __iomem *port_regs =
1149 qdev->mem_map_registers;
1154 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1155 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1157 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1159 if (qdev->mac_index)
1160 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1162 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1166 * Caller holds hw_lock.
1168 static int ql_is_fiber(struct ql3_adapter *qdev)
1170 struct ql3xxx_port_registers __iomem *port_regs =
1171 qdev->mem_map_registers;
1175 switch (qdev->mac_index) {
1177 bitToCheck = PORT_STATUS_SM0;
1180 bitToCheck = PORT_STATUS_SM1;
1184 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1185 return (temp & bitToCheck) != 0;
1188 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1191 ql_mii_read_reg(qdev, 0x00, ®);
1192 return (reg & 0x1000) != 0;
1196 * Caller holds hw_lock.
1198 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1200 struct ql3xxx_port_registers __iomem *port_regs =
1201 qdev->mem_map_registers;
1205 switch (qdev->mac_index) {
1207 bitToCheck = PORT_STATUS_AC0;
1210 bitToCheck = PORT_STATUS_AC1;
1214 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1215 if (temp & bitToCheck) {
1216 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1219 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1224 * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1226 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1228 if (ql_is_fiber(qdev))
1229 return ql_is_petbi_neg_pause(qdev);
1231 return ql_is_phy_neg_pause(qdev);
1234 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1236 struct ql3xxx_port_registers __iomem *port_regs =
1237 qdev->mem_map_registers;
1241 switch (qdev->mac_index) {
1243 bitToCheck = PORT_STATUS_AE0;
1246 bitToCheck = PORT_STATUS_AE1;
1249 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1250 return (temp & bitToCheck) != 0;
1253 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1255 if (ql_is_fiber(qdev))
1258 return ql_phy_get_speed(qdev);
1261 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1263 if (ql_is_fiber(qdev))
1266 return ql_is_full_dup(qdev);
1270 * Caller holds hw_lock.
1272 static int ql_link_down_detect(struct ql3_adapter *qdev)
1274 struct ql3xxx_port_registers __iomem *port_regs =
1275 qdev->mem_map_registers;
1279 switch (qdev->mac_index) {
1281 bitToCheck = ISP_CONTROL_LINK_DN_0;
1284 bitToCheck = ISP_CONTROL_LINK_DN_1;
1289 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1290 return (temp & bitToCheck) != 0;
1294 * Caller holds hw_lock.
1296 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1298 struct ql3xxx_port_registers __iomem *port_regs =
1299 qdev->mem_map_registers;
1301 switch (qdev->mac_index) {
1303 ql_write_common_reg(qdev,
1304 &port_regs->CommonRegs.ispControlStatus,
1305 (ISP_CONTROL_LINK_DN_0) |
1306 (ISP_CONTROL_LINK_DN_0 << 16));
1310 ql_write_common_reg(qdev,
1311 &port_regs->CommonRegs.ispControlStatus,
1312 (ISP_CONTROL_LINK_DN_1) |
1313 (ISP_CONTROL_LINK_DN_1 << 16));
1324 * Caller holds hw_lock.
1326 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1328 struct ql3xxx_port_registers __iomem *port_regs =
1329 qdev->mem_map_registers;
1333 switch (qdev->mac_index) {
1335 bitToCheck = PORT_STATUS_F1_ENABLED;
1338 bitToCheck = PORT_STATUS_F3_ENABLED;
1344 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1345 if (temp & bitToCheck) {
1346 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1347 "not link master\n");
1351 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1355 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1357 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1358 PHYAddr[qdev->mac_index]);
1361 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1364 u16 portConfiguration;
1366 if(qdev->phyType == PHY_AGERE_ET1011C) {
1367 /* turn off external loopback */
1368 ql_mii_write_reg(qdev, 0x13, 0x0000);
1371 if(qdev->mac_index == 0)
1372 portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration;
1374 portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration;
1376 /* Some HBA's in the field are set to 0 and they need to
1377 be reinterpreted with a default value */
1378 if(portConfiguration == 0)
1379 portConfiguration = PORT_CONFIG_DEFAULT;
1381 /* Set the 1000 advertisements */
1382 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®,
1383 PHYAddr[qdev->mac_index]);
1384 reg &= ~PHY_GIG_ALL_PARAMS;
1386 if(portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1387 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1388 reg |= PHY_GIG_ADV_1000F;
1390 reg |= PHY_GIG_ADV_1000H;
1393 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1394 PHYAddr[qdev->mac_index]);
1396 /* Set the 10/100 & pause negotiation advertisements */
1397 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®,
1398 PHYAddr[qdev->mac_index]);
1399 reg &= ~PHY_NEG_ALL_PARAMS;
1401 if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1402 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1404 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1405 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1406 reg |= PHY_NEG_ADV_100F;
1408 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1409 reg |= PHY_NEG_ADV_10F;
1412 if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1413 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1414 reg |= PHY_NEG_ADV_100H;
1416 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1417 reg |= PHY_NEG_ADV_10H;
1420 if(portConfiguration &
1421 PORT_CONFIG_1000MB_SPEED) {
1425 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1426 PHYAddr[qdev->mac_index]);
1428 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]);
1430 ql_mii_write_reg_ex(qdev, CONTROL_REG,
1431 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1432 PHYAddr[qdev->mac_index]);
1435 static void ql_phy_init_ex(struct ql3_adapter *qdev)
1437 ql_phy_reset_ex(qdev);
1439 ql_phy_start_neg_ex(qdev);
1443 * Caller holds hw_lock.
1445 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1447 struct ql3xxx_port_registers __iomem *port_regs =
1448 qdev->mem_map_registers;
1450 u32 temp, linkState;
1452 switch (qdev->mac_index) {
1454 bitToCheck = PORT_STATUS_UP0;
1457 bitToCheck = PORT_STATUS_UP1;
1460 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1461 if (temp & bitToCheck) {
1464 linkState = LS_DOWN;
1469 static int ql_port_start(struct ql3_adapter *qdev)
1471 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1472 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1474 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1478 if (ql_is_fiber(qdev)) {
1479 ql_petbi_init(qdev);
1482 ql_phy_init_ex(qdev);
1485 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1489 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1492 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1493 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1497 if (!ql_auto_neg_error(qdev)) {
1498 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1499 /* configure the MAC */
1500 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1501 "Configuring link\n");
1502 ql_mac_cfg_soft_reset(qdev, 1);
1503 ql_mac_cfg_gig(qdev,
1507 ql_mac_cfg_full_dup(qdev,
1510 ql_mac_cfg_pause(qdev,
1513 ql_mac_cfg_soft_reset(qdev, 0);
1515 /* enable the MAC */
1516 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1518 ql_mac_enable(qdev, 1);
1521 qdev->port_link_state = LS_UP;
1522 netif_start_queue(qdev->ndev);
1523 netif_carrier_on(qdev->ndev);
1524 netif_info(qdev, link, qdev->ndev,
1525 "Link is up at %d Mbps, %s duplex\n",
1526 ql_get_link_speed(qdev),
1527 ql_is_link_full_dup(qdev) ? "full" : "half");
1529 } else { /* Remote error detected */
1531 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1532 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1533 "Remote error detected. Calling ql_port_start()\n");
1535 * ql_port_start() is shared code and needs
1536 * to lock the PHY on it's own.
1538 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1539 if(ql_port_start(qdev)) {/* Restart port */
1545 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1549 static void ql_link_state_machine_work(struct work_struct *work)
1551 struct ql3_adapter *qdev =
1552 container_of(work, struct ql3_adapter, link_state_work.work);
1554 u32 curr_link_state;
1555 unsigned long hw_flags;
1557 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1559 curr_link_state = ql_get_link_state(qdev);
1561 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1562 netif_info(qdev, link, qdev->ndev,
1563 "Reset in progress, skip processing link state\n");
1565 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1567 /* Restart timer on 2 second interval. */
1568 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1573 switch (qdev->port_link_state) {
1575 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1576 ql_port_start(qdev);
1578 qdev->port_link_state = LS_DOWN;
1582 if (curr_link_state == LS_UP) {
1583 netif_info(qdev, link, qdev->ndev, "Link is up\n");
1584 if (ql_is_auto_neg_complete(qdev))
1585 ql_finish_auto_neg(qdev);
1587 if (qdev->port_link_state == LS_UP)
1588 ql_link_down_detect_clear(qdev);
1590 qdev->port_link_state = LS_UP;
1596 * See if the link is currently down or went down and came
1599 if (curr_link_state == LS_DOWN) {
1600 netif_info(qdev, link, qdev->ndev, "Link is down\n");
1601 qdev->port_link_state = LS_DOWN;
1603 if (ql_link_down_detect(qdev))
1604 qdev->port_link_state = LS_DOWN;
1607 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1609 /* Restart timer on 2 second interval. */
1610 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1614 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1616 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1618 if (ql_this_adapter_controls_port(qdev))
1619 set_bit(QL_LINK_MASTER,&qdev->flags);
1621 clear_bit(QL_LINK_MASTER,&qdev->flags);
1625 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1627 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1629 ql_mii_enable_scan_mode(qdev);
1631 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1632 if (ql_this_adapter_controls_port(qdev))
1633 ql_petbi_init_ex(qdev);
1635 if (ql_this_adapter_controls_port(qdev))
1636 ql_phy_init_ex(qdev);
1641 * MII_Setup needs to be called before taking the PHY out of reset so that the
1642 * management interface clock speed can be set properly. It would be better if
1643 * we had a way to disable MDC until after the PHY is out of reset, but we
1644 * don't have that capability.
1646 static int ql_mii_setup(struct ql3_adapter *qdev)
1649 struct ql3xxx_port_registers __iomem *port_regs =
1650 qdev->mem_map_registers;
1652 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1653 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1657 if (qdev->device_id == QL3032_DEVICE_ID)
1658 ql_write_page0_reg(qdev,
1659 &port_regs->macMIIMgmtControlReg, 0x0f00000);
1661 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1662 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1664 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1665 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1667 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1671 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1675 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1676 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1677 | SUPPORTED_Autoneg;
1679 supported = SUPPORTED_10baseT_Half
1680 | SUPPORTED_10baseT_Full
1681 | SUPPORTED_100baseT_Half
1682 | SUPPORTED_100baseT_Full
1683 | SUPPORTED_1000baseT_Half
1684 | SUPPORTED_1000baseT_Full
1685 | SUPPORTED_Autoneg | SUPPORTED_TP;
1691 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1694 unsigned long hw_flags;
1695 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1696 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1697 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1699 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1702 status = ql_is_auto_cfg(qdev);
1703 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1704 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1708 static u32 ql_get_speed(struct ql3_adapter *qdev)
1711 unsigned long hw_flags;
1712 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1713 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1714 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1716 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1719 status = ql_get_link_speed(qdev);
1720 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1721 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1725 static int ql_get_full_dup(struct ql3_adapter *qdev)
1728 unsigned long hw_flags;
1729 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1730 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1731 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1733 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1736 status = ql_is_link_full_dup(qdev);
1737 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1738 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1743 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1745 struct ql3_adapter *qdev = netdev_priv(ndev);
1747 ecmd->transceiver = XCVR_INTERNAL;
1748 ecmd->supported = ql_supported_modes(qdev);
1750 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1751 ecmd->port = PORT_FIBRE;
1753 ecmd->port = PORT_TP;
1754 ecmd->phy_address = qdev->PHYAddr;
1756 ecmd->advertising = ql_supported_modes(qdev);
1757 ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1758 ecmd->speed = ql_get_speed(qdev);
1759 ecmd->duplex = ql_get_full_dup(qdev);
1763 static void ql_get_drvinfo(struct net_device *ndev,
1764 struct ethtool_drvinfo *drvinfo)
1766 struct ql3_adapter *qdev = netdev_priv(ndev);
1767 strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
1768 strncpy(drvinfo->version, ql3xxx_driver_version, 32);
1769 strncpy(drvinfo->fw_version, "N/A", 32);
1770 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1771 drvinfo->regdump_len = 0;
1772 drvinfo->eedump_len = 0;
1775 static u32 ql_get_msglevel(struct net_device *ndev)
1777 struct ql3_adapter *qdev = netdev_priv(ndev);
1778 return qdev->msg_enable;
1781 static void ql_set_msglevel(struct net_device *ndev, u32 value)
1783 struct ql3_adapter *qdev = netdev_priv(ndev);
1784 qdev->msg_enable = value;
1787 static void ql_get_pauseparam(struct net_device *ndev,
1788 struct ethtool_pauseparam *pause)
1790 struct ql3_adapter *qdev = netdev_priv(ndev);
1791 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1794 if(qdev->mac_index == 0)
1795 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1797 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1799 pause->autoneg = ql_get_auto_cfg_status(qdev);
1800 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1801 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1804 static const struct ethtool_ops ql3xxx_ethtool_ops = {
1805 .get_settings = ql_get_settings,
1806 .get_drvinfo = ql_get_drvinfo,
1807 .get_link = ethtool_op_get_link,
1808 .get_msglevel = ql_get_msglevel,
1809 .set_msglevel = ql_set_msglevel,
1810 .get_pauseparam = ql_get_pauseparam,
1813 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1815 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1819 while (lrg_buf_cb) {
1820 if (!lrg_buf_cb->skb) {
1821 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
1822 qdev->lrg_buffer_len);
1823 if (unlikely(!lrg_buf_cb->skb)) {
1824 netdev_printk(KERN_DEBUG, qdev->ndev,
1825 "Failed netdev_alloc_skb()\n");
1829 * We save some space to copy the ethhdr from
1832 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1833 map = pci_map_single(qdev->pdev,
1834 lrg_buf_cb->skb->data,
1835 qdev->lrg_buffer_len -
1837 PCI_DMA_FROMDEVICE);
1839 err = pci_dma_mapping_error(qdev->pdev, map);
1841 netdev_err(qdev->ndev,
1842 "PCI mapping failed with error: %d\n",
1844 dev_kfree_skb(lrg_buf_cb->skb);
1845 lrg_buf_cb->skb = NULL;
1850 lrg_buf_cb->buf_phy_addr_low =
1851 cpu_to_le32(LS_64BITS(map));
1852 lrg_buf_cb->buf_phy_addr_high =
1853 cpu_to_le32(MS_64BITS(map));
1854 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1855 dma_unmap_len_set(lrg_buf_cb, maplen,
1856 qdev->lrg_buffer_len -
1858 --qdev->lrg_buf_skb_check;
1859 if (!qdev->lrg_buf_skb_check)
1863 lrg_buf_cb = lrg_buf_cb->next;
1869 * Caller holds hw_lock.
1871 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1873 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1874 if (qdev->small_buf_release_cnt >= 16) {
1875 while (qdev->small_buf_release_cnt >= 16) {
1876 qdev->small_buf_q_producer_index++;
1878 if (qdev->small_buf_q_producer_index ==
1880 qdev->small_buf_q_producer_index = 0;
1881 qdev->small_buf_release_cnt -= 8;
1884 writel(qdev->small_buf_q_producer_index,
1885 &port_regs->CommonRegs.rxSmallQProducerIndex);
1890 * Caller holds hw_lock.
1892 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1894 struct bufq_addr_element *lrg_buf_q_ele;
1896 struct ql_rcv_buf_cb *lrg_buf_cb;
1897 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1899 if ((qdev->lrg_buf_free_count >= 8) &&
1900 (qdev->lrg_buf_release_cnt >= 16)) {
1902 if (qdev->lrg_buf_skb_check)
1903 if (!ql_populate_free_queue(qdev))
1906 lrg_buf_q_ele = qdev->lrg_buf_next_free;
1908 while ((qdev->lrg_buf_release_cnt >= 16) &&
1909 (qdev->lrg_buf_free_count >= 8)) {
1911 for (i = 0; i < 8; i++) {
1913 ql_get_from_lrg_buf_free_list(qdev);
1914 lrg_buf_q_ele->addr_high =
1915 lrg_buf_cb->buf_phy_addr_high;
1916 lrg_buf_q_ele->addr_low =
1917 lrg_buf_cb->buf_phy_addr_low;
1920 qdev->lrg_buf_release_cnt--;
1923 qdev->lrg_buf_q_producer_index++;
1925 if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
1926 qdev->lrg_buf_q_producer_index = 0;
1928 if (qdev->lrg_buf_q_producer_index ==
1929 (qdev->num_lbufq_entries - 1)) {
1930 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1934 qdev->lrg_buf_next_free = lrg_buf_q_ele;
1935 writel(qdev->lrg_buf_q_producer_index,
1936 &port_regs->CommonRegs.rxLargeQProducerIndex);
1940 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1941 struct ob_mac_iocb_rsp *mac_rsp)
1943 struct ql_tx_buf_cb *tx_cb;
1947 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1948 netdev_warn(qdev->ndev,
1949 "Frame too short but it was padded and sent\n");
1952 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1954 /* Check the transmit response flags for any errors */
1955 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1956 netdev_err(qdev->ndev,
1957 "Frame too short to be legal, frame not sent\n");
1959 qdev->ndev->stats.tx_errors++;
1961 goto frame_not_sent;
1964 if(tx_cb->seg_count == 0) {
1965 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1966 mac_rsp->transaction_id);
1968 qdev->ndev->stats.tx_errors++;
1970 goto invalid_seg_count;
1973 pci_unmap_single(qdev->pdev,
1974 dma_unmap_addr(&tx_cb->map[0], mapaddr),
1975 dma_unmap_len(&tx_cb->map[0], maplen),
1978 if (tx_cb->seg_count) {
1979 for (i = 1; i < tx_cb->seg_count; i++) {
1980 pci_unmap_page(qdev->pdev,
1981 dma_unmap_addr(&tx_cb->map[i],
1983 dma_unmap_len(&tx_cb->map[i], maplen),
1987 qdev->ndev->stats.tx_packets++;
1988 qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
1991 dev_kfree_skb_irq(tx_cb->skb);
1995 atomic_inc(&qdev->tx_count);
1998 static void ql_get_sbuf(struct ql3_adapter *qdev)
2000 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
2001 qdev->small_buf_index = 0;
2002 qdev->small_buf_release_cnt++;
2005 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
2007 struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
2008 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
2009 qdev->lrg_buf_release_cnt++;
2010 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
2011 qdev->lrg_buf_index = 0;
2016 * The difference between 3022 and 3032 for inbound completions:
2017 * 3022 uses two buffers per completion. The first buffer contains
2018 * (some) header info, the second the remainder of the headers plus
2019 * the data. For this chip we reserve some space at the top of the
2020 * receive buffer so that the header info in buffer one can be
2021 * prepended to the buffer two. Buffer two is the sent up while
2022 * buffer one is returned to the hardware to be reused.
2023 * 3032 receives all of it's data and headers in one buffer for a
2024 * simpler process. 3032 also supports checksum verification as
2025 * can be seen in ql_process_macip_rx_intr().
2027 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2028 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
2030 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2031 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2032 struct sk_buff *skb;
2033 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2036 * Get the inbound address list (small buffer).
2040 if (qdev->device_id == QL3022_DEVICE_ID)
2041 lrg_buf_cb1 = ql_get_lbuf(qdev);
2043 /* start of second buffer */
2044 lrg_buf_cb2 = ql_get_lbuf(qdev);
2045 skb = lrg_buf_cb2->skb;
2047 qdev->ndev->stats.rx_packets++;
2048 qdev->ndev->stats.rx_bytes += length;
2050 skb_put(skb, length);
2051 pci_unmap_single(qdev->pdev,
2052 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2053 dma_unmap_len(lrg_buf_cb2, maplen),
2054 PCI_DMA_FROMDEVICE);
2055 prefetch(skb->data);
2056 skb->ip_summed = CHECKSUM_NONE;
2057 skb->protocol = eth_type_trans(skb, qdev->ndev);
2059 netif_receive_skb(skb);
2060 lrg_buf_cb2->skb = NULL;
2062 if (qdev->device_id == QL3022_DEVICE_ID)
2063 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2064 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2067 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2068 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2070 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2071 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2072 struct sk_buff *skb1 = NULL, *skb2;
2073 struct net_device *ndev = qdev->ndev;
2074 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2078 * Get the inbound address list (small buffer).
2083 if (qdev->device_id == QL3022_DEVICE_ID) {
2084 /* start of first buffer on 3022 */
2085 lrg_buf_cb1 = ql_get_lbuf(qdev);
2086 skb1 = lrg_buf_cb1->skb;
2088 if (*((u16 *) skb1->data) != 0xFFFF)
2089 size += VLAN_ETH_HLEN - ETH_HLEN;
2092 /* start of second buffer */
2093 lrg_buf_cb2 = ql_get_lbuf(qdev);
2094 skb2 = lrg_buf_cb2->skb;
2096 skb_put(skb2, length); /* Just the second buffer length here. */
2097 pci_unmap_single(qdev->pdev,
2098 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2099 dma_unmap_len(lrg_buf_cb2, maplen),
2100 PCI_DMA_FROMDEVICE);
2101 prefetch(skb2->data);
2103 skb2->ip_summed = CHECKSUM_NONE;
2104 if (qdev->device_id == QL3022_DEVICE_ID) {
2106 * Copy the ethhdr from first buffer to second. This
2107 * is necessary for 3022 IP completions.
2109 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2110 skb_push(skb2, size), size);
2112 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2114 (IB_IP_IOCB_RSP_3032_ICE |
2115 IB_IP_IOCB_RSP_3032_CE)) {
2117 "%s: Bad checksum for this %s packet, checksum = %x\n",
2119 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
2120 "TCP" : "UDP"), checksum);
2121 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2122 (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2123 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
2124 skb2->ip_summed = CHECKSUM_UNNECESSARY;
2127 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2129 netif_receive_skb(skb2);
2130 ndev->stats.rx_packets++;
2131 ndev->stats.rx_bytes += length;
2132 lrg_buf_cb2->skb = NULL;
2134 if (qdev->device_id == QL3022_DEVICE_ID)
2135 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2136 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2139 static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2140 int *tx_cleaned, int *rx_cleaned, int work_to_do)
2142 struct net_rsp_iocb *net_rsp;
2143 struct net_device *ndev = qdev->ndev;
2146 /* While there are entries in the completion queue. */
2147 while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2148 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
2150 net_rsp = qdev->rsp_current;
2153 * Fix 4032 chipe undocumented "feature" where bit-8 is set if the
2154 * inbound completion is for a VLAN.
2156 if (qdev->device_id == QL3032_DEVICE_ID)
2157 net_rsp->opcode &= 0x7f;
2158 switch (net_rsp->opcode) {
2160 case OPCODE_OB_MAC_IOCB_FN0:
2161 case OPCODE_OB_MAC_IOCB_FN2:
2162 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2167 case OPCODE_IB_MAC_IOCB:
2168 case OPCODE_IB_3032_MAC_IOCB:
2169 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2174 case OPCODE_IB_IP_IOCB:
2175 case OPCODE_IB_3032_IP_IOCB:
2176 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2182 u32 *tmp = (u32 *) net_rsp;
2184 "Hit default case, not handled!\n"
2185 " dropping the packet, opcode = %x\n"
2186 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2188 (unsigned long int)tmp[0],
2189 (unsigned long int)tmp[1],
2190 (unsigned long int)tmp[2],
2191 (unsigned long int)tmp[3]);
2195 qdev->rsp_consumer_index++;
2197 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2198 qdev->rsp_consumer_index = 0;
2199 qdev->rsp_current = qdev->rsp_q_virt_addr;
2201 qdev->rsp_current++;
2204 work_done = *tx_cleaned + *rx_cleaned;
2210 static int ql_poll(struct napi_struct *napi, int budget)
2212 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2213 int rx_cleaned = 0, tx_cleaned = 0;
2214 unsigned long hw_flags;
2215 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2217 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
2219 if (tx_cleaned + rx_cleaned != budget) {
2220 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2221 __napi_complete(napi);
2222 ql_update_small_bufq_prod_index(qdev);
2223 ql_update_lrg_bufq_prod_index(qdev);
2224 writel(qdev->rsp_consumer_index,
2225 &port_regs->CommonRegs.rspQConsumerIndex);
2226 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2228 ql_enable_interrupts(qdev);
2230 return tx_cleaned + rx_cleaned;
2233 static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2236 struct net_device *ndev = dev_id;
2237 struct ql3_adapter *qdev = netdev_priv(ndev);
2238 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2243 port_regs = qdev->mem_map_registers;
2246 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2248 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2249 spin_lock(&qdev->adapter_lock);
2250 netif_stop_queue(qdev->ndev);
2251 netif_carrier_off(qdev->ndev);
2252 ql_disable_interrupts(qdev);
2253 qdev->port_link_state = LS_DOWN;
2254 set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
2256 if (value & ISP_CONTROL_FE) {
2261 ql_read_page0_reg_l(qdev,
2262 &port_regs->PortFatalErrStatus);
2264 "Resetting chip. PortFatalErrStatus register = 0x%x\n",
2266 set_bit(QL_RESET_START,&qdev->flags) ;
2269 * Soft Reset Requested.
2271 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2273 "Another function issued a reset to the chip. ISR value = %x\n",
2276 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2277 spin_unlock(&qdev->adapter_lock);
2278 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2279 ql_disable_interrupts(qdev);
2280 if (likely(napi_schedule_prep(&qdev->napi))) {
2281 __napi_schedule(&qdev->napi);
2287 return IRQ_RETVAL(handled);
2291 * Get the total number of segments needed for the
2292 * given number of fragments. This is necessary because
2293 * outbound address lists (OAL) will be used when more than
2294 * two frags are given. Each address list has 5 addr/len
2295 * pairs. The 5th pair in each AOL is used to point to
2296 * the next AOL if more frags are coming.
2297 * That is why the frags:segment count ratio is not linear.
2299 static int ql_get_seg_count(struct ql3_adapter *qdev,
2300 unsigned short frags)
2302 if (qdev->device_id == QL3022_DEVICE_ID)
2306 case 0: return 1; /* just the skb->data seg */
2307 case 1: return 2; /* skb->data + 1 frag */
2308 case 2: return 3; /* skb->data + 2 frags */
2309 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
2329 static void ql_hw_csum_setup(const struct sk_buff *skb,
2330 struct ob_mac_iocb_req *mac_iocb_ptr)
2332 const struct iphdr *ip = ip_hdr(skb);
2334 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2335 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2337 if (ip->protocol == IPPROTO_TCP) {
2338 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2339 OB_3032MAC_IOCB_REQ_IC;
2341 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2342 OB_3032MAC_IOCB_REQ_IC;
2348 * Map the buffers for this transmit. This will return
2349 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2351 static int ql_send_map(struct ql3_adapter *qdev,
2352 struct ob_mac_iocb_req *mac_iocb_ptr,
2353 struct ql_tx_buf_cb *tx_cb,
2354 struct sk_buff *skb)
2357 struct oal_entry *oal_entry;
2358 int len = skb_headlen(skb);
2361 int completed_segs, i;
2362 int seg_cnt, seg = 0;
2363 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2365 seg_cnt = tx_cb->seg_count;
2367 * Map the skb buffer first.
2369 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2371 err = pci_dma_mapping_error(qdev->pdev, map);
2373 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2376 return NETDEV_TX_BUSY;
2379 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2380 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2381 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2382 oal_entry->len = cpu_to_le32(len);
2383 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2384 dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
2388 /* Terminate the last segment. */
2389 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2392 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
2393 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2395 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2396 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2397 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2398 (seg == 17 && seg_cnt > 18)) {
2399 /* Continuation entry points to outbound address list. */
2400 map = pci_map_single(qdev->pdev, oal,
2404 err = pci_dma_mapping_error(qdev->pdev, map);
2407 netdev_err(qdev->ndev,
2408 "PCI mapping outbound address list with error: %d\n",
2413 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2414 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2416 cpu_to_le32(sizeof(struct oal) |
2418 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2420 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2421 sizeof(struct oal));
2422 oal_entry = (struct oal_entry *)oal;
2428 pci_map_page(qdev->pdev, frag->page,
2429 frag->page_offset, frag->size,
2432 err = pci_dma_mapping_error(qdev->pdev, map);
2434 netdev_err(qdev->ndev,
2435 "PCI mapping frags failed with error: %d\n",
2440 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2441 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2442 oal_entry->len = cpu_to_le32(frag->size);
2443 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2444 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2447 /* Terminate the last segment. */
2448 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2451 return NETDEV_TX_OK;
2454 /* A PCI mapping failed and now we will need to back out
2455 * We need to traverse through the oal's and associated pages which
2456 * have been mapped and now we must unmap them to clean up properly
2460 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2462 for (i=0; i<completed_segs; i++,seg++) {
2465 if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2466 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2467 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2468 (seg == 17 && seg_cnt > 18)) {
2469 pci_unmap_single(qdev->pdev,
2470 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2471 dma_unmap_len(&tx_cb->map[seg], maplen),
2477 pci_unmap_page(qdev->pdev,
2478 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2479 dma_unmap_len(&tx_cb->map[seg], maplen),
2483 pci_unmap_single(qdev->pdev,
2484 dma_unmap_addr(&tx_cb->map[0], mapaddr),
2485 dma_unmap_addr(&tx_cb->map[0], maplen),
2488 return NETDEV_TX_BUSY;
2493 * The difference between 3022 and 3032 sends:
2494 * 3022 only supports a simple single segment transmission.
2495 * 3032 supports checksumming and scatter/gather lists (fragments).
2496 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2497 * in the IOCB plus a chain of outbound address lists (OAL) that
2498 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2499 * will used to point to an OAL when more ALP entries are required.
2500 * The IOCB is always the top of the chain followed by one or more
2501 * OALs (when necessary).
2503 static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2504 struct net_device *ndev)
2506 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2507 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2508 struct ql_tx_buf_cb *tx_cb;
2509 u32 tot_len = skb->len;
2510 struct ob_mac_iocb_req *mac_iocb_ptr;
2512 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2513 return NETDEV_TX_BUSY;
2516 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2517 if((tx_cb->seg_count = ql_get_seg_count(qdev,
2518 (skb_shinfo(skb)->nr_frags))) == -1) {
2519 netdev_err(ndev, "%s: invalid segment count!\n", __func__);
2520 return NETDEV_TX_OK;
2523 mac_iocb_ptr = tx_cb->queue_entry;
2524 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2525 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2526 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2527 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2528 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2529 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2531 if (qdev->device_id == QL3032_DEVICE_ID &&
2532 skb->ip_summed == CHECKSUM_PARTIAL)
2533 ql_hw_csum_setup(skb, mac_iocb_ptr);
2535 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2536 netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
2537 return NETDEV_TX_BUSY;
2541 qdev->req_producer_index++;
2542 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2543 qdev->req_producer_index = 0;
2545 ql_write_common_reg_l(qdev,
2546 &port_regs->CommonRegs.reqQProducerIndex,
2547 qdev->req_producer_index);
2549 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2550 "tx queued, slot %d, len %d\n",
2551 qdev->req_producer_index, skb->len);
2553 atomic_dec(&qdev->tx_count);
2554 return NETDEV_TX_OK;
2557 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2560 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2562 qdev->req_q_virt_addr =
2563 pci_alloc_consistent(qdev->pdev,
2564 (size_t) qdev->req_q_size,
2565 &qdev->req_q_phy_addr);
2567 if ((qdev->req_q_virt_addr == NULL) ||
2568 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2569 netdev_err(qdev->ndev, "reqQ failed\n");
2573 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2575 qdev->rsp_q_virt_addr =
2576 pci_alloc_consistent(qdev->pdev,
2577 (size_t) qdev->rsp_q_size,
2578 &qdev->rsp_q_phy_addr);
2580 if ((qdev->rsp_q_virt_addr == NULL) ||
2581 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2582 netdev_err(qdev->ndev, "rspQ allocation failed\n");
2583 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2584 qdev->req_q_virt_addr,
2585 qdev->req_q_phy_addr);
2589 set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2594 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2596 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2597 netdev_info(qdev->ndev, "Already done\n");
2601 pci_free_consistent(qdev->pdev,
2603 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2605 qdev->req_q_virt_addr = NULL;
2607 pci_free_consistent(qdev->pdev,
2609 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2611 qdev->rsp_q_virt_addr = NULL;
2613 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2616 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2618 /* Create Large Buffer Queue */
2619 qdev->lrg_buf_q_size =
2620 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2621 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2622 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2624 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2626 qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
2627 if (qdev->lrg_buf == NULL) {
2628 netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
2632 qdev->lrg_buf_q_alloc_virt_addr =
2633 pci_alloc_consistent(qdev->pdev,
2634 qdev->lrg_buf_q_alloc_size,
2635 &qdev->lrg_buf_q_alloc_phy_addr);
2637 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2638 netdev_err(qdev->ndev, "lBufQ failed\n");
2641 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2642 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2644 /* Create Small Buffer Queue */
2645 qdev->small_buf_q_size =
2646 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2647 if (qdev->small_buf_q_size < PAGE_SIZE)
2648 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2650 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2652 qdev->small_buf_q_alloc_virt_addr =
2653 pci_alloc_consistent(qdev->pdev,
2654 qdev->small_buf_q_alloc_size,
2655 &qdev->small_buf_q_alloc_phy_addr);
2657 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2658 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2659 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2660 qdev->lrg_buf_q_alloc_virt_addr,
2661 qdev->lrg_buf_q_alloc_phy_addr);
2665 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2666 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2667 set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2671 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2673 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2674 netdev_info(qdev->ndev, "Already done\n");
2677 if(qdev->lrg_buf) kfree(qdev->lrg_buf);
2678 pci_free_consistent(qdev->pdev,
2679 qdev->lrg_buf_q_alloc_size,
2680 qdev->lrg_buf_q_alloc_virt_addr,
2681 qdev->lrg_buf_q_alloc_phy_addr);
2683 qdev->lrg_buf_q_virt_addr = NULL;
2685 pci_free_consistent(qdev->pdev,
2686 qdev->small_buf_q_alloc_size,
2687 qdev->small_buf_q_alloc_virt_addr,
2688 qdev->small_buf_q_alloc_phy_addr);
2690 qdev->small_buf_q_virt_addr = NULL;
2692 clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2695 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2698 struct bufq_addr_element *small_buf_q_entry;
2700 /* Currently we allocate on one of memory and use it for smallbuffers */
2701 qdev->small_buf_total_size =
2702 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2703 QL_SMALL_BUFFER_SIZE);
2705 qdev->small_buf_virt_addr =
2706 pci_alloc_consistent(qdev->pdev,
2707 qdev->small_buf_total_size,
2708 &qdev->small_buf_phy_addr);
2710 if (qdev->small_buf_virt_addr == NULL) {
2711 netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2715 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2716 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2718 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2720 /* Initialize the small buffer queue. */
2721 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2722 small_buf_q_entry->addr_high =
2723 cpu_to_le32(qdev->small_buf_phy_addr_high);
2724 small_buf_q_entry->addr_low =
2725 cpu_to_le32(qdev->small_buf_phy_addr_low +
2726 (i * QL_SMALL_BUFFER_SIZE));
2727 small_buf_q_entry++;
2729 qdev->small_buf_index = 0;
2730 set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
2734 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2736 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2737 netdev_info(qdev->ndev, "Already done\n");
2740 if (qdev->small_buf_virt_addr != NULL) {
2741 pci_free_consistent(qdev->pdev,
2742 qdev->small_buf_total_size,
2743 qdev->small_buf_virt_addr,
2744 qdev->small_buf_phy_addr);
2746 qdev->small_buf_virt_addr = NULL;
2750 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2753 struct ql_rcv_buf_cb *lrg_buf_cb;
2755 for (i = 0; i < qdev->num_large_buffers; i++) {
2756 lrg_buf_cb = &qdev->lrg_buf[i];
2757 if (lrg_buf_cb->skb) {
2758 dev_kfree_skb(lrg_buf_cb->skb);
2759 pci_unmap_single(qdev->pdev,
2760 dma_unmap_addr(lrg_buf_cb, mapaddr),
2761 dma_unmap_len(lrg_buf_cb, maplen),
2762 PCI_DMA_FROMDEVICE);
2763 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2770 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2773 struct ql_rcv_buf_cb *lrg_buf_cb;
2774 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2776 for (i = 0; i < qdev->num_large_buffers; i++) {
2777 lrg_buf_cb = &qdev->lrg_buf[i];
2778 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2779 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2782 qdev->lrg_buf_index = 0;
2783 qdev->lrg_buf_skb_check = 0;
2786 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2789 struct ql_rcv_buf_cb *lrg_buf_cb;
2790 struct sk_buff *skb;
2794 for (i = 0; i < qdev->num_large_buffers; i++) {
2795 skb = netdev_alloc_skb(qdev->ndev,
2796 qdev->lrg_buffer_len);
2797 if (unlikely(!skb)) {
2798 /* Better luck next round */
2799 netdev_err(qdev->ndev,
2800 "large buff alloc failed for %d bytes at index %d\n",
2801 qdev->lrg_buffer_len * 2, i);
2802 ql_free_large_buffers(qdev);
2806 lrg_buf_cb = &qdev->lrg_buf[i];
2807 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2808 lrg_buf_cb->index = i;
2809 lrg_buf_cb->skb = skb;
2811 * We save some space to copy the ethhdr from first
2814 skb_reserve(skb, QL_HEADER_SPACE);
2815 map = pci_map_single(qdev->pdev,
2817 qdev->lrg_buffer_len -
2819 PCI_DMA_FROMDEVICE);
2821 err = pci_dma_mapping_error(qdev->pdev, map);
2823 netdev_err(qdev->ndev,
2824 "PCI mapping failed with error: %d\n",
2826 ql_free_large_buffers(qdev);
2830 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2831 dma_unmap_len_set(lrg_buf_cb, maplen,
2832 qdev->lrg_buffer_len -
2834 lrg_buf_cb->buf_phy_addr_low =
2835 cpu_to_le32(LS_64BITS(map));
2836 lrg_buf_cb->buf_phy_addr_high =
2837 cpu_to_le32(MS_64BITS(map));
2843 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2845 struct ql_tx_buf_cb *tx_cb;
2848 tx_cb = &qdev->tx_buf[0];
2849 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2858 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2860 struct ql_tx_buf_cb *tx_cb;
2862 struct ob_mac_iocb_req *req_q_curr =
2863 qdev->req_q_virt_addr;
2865 /* Create free list of transmit buffers */
2866 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2868 tx_cb = &qdev->tx_buf[i];
2870 tx_cb->queue_entry = req_q_curr;
2872 tx_cb->oal = kmalloc(512, GFP_KERNEL);
2873 if (tx_cb->oal == NULL)
2879 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2881 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2882 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2883 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2885 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2887 * Bigger buffers, so less of them.
2889 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2890 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2892 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
2893 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2896 qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2897 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2898 qdev->max_frame_size =
2899 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2902 * First allocate a page of shared memory and use it for shadow
2903 * locations of Network Request Queue Consumer Address Register and
2904 * Network Completion Queue Producer Index Register
2906 qdev->shadow_reg_virt_addr =
2907 pci_alloc_consistent(qdev->pdev,
2908 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2910 if (qdev->shadow_reg_virt_addr != NULL) {
2911 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
2912 qdev->req_consumer_index_phy_addr_high =
2913 MS_64BITS(qdev->shadow_reg_phy_addr);
2914 qdev->req_consumer_index_phy_addr_low =
2915 LS_64BITS(qdev->shadow_reg_phy_addr);
2917 qdev->prsp_producer_index =
2918 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2919 qdev->rsp_producer_index_phy_addr_high =
2920 qdev->req_consumer_index_phy_addr_high;
2921 qdev->rsp_producer_index_phy_addr_low =
2922 qdev->req_consumer_index_phy_addr_low + 8;
2924 netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
2928 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2929 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
2933 if (ql_alloc_buffer_queues(qdev) != 0) {
2934 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
2935 goto err_buffer_queues;
2938 if (ql_alloc_small_buffers(qdev) != 0) {
2939 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
2940 goto err_small_buffers;
2943 if (ql_alloc_large_buffers(qdev) != 0) {
2944 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
2945 goto err_small_buffers;
2948 /* Initialize the large buffer queue. */
2949 ql_init_large_buffers(qdev);
2950 if (ql_create_send_free_list(qdev))
2953 qdev->rsp_current = qdev->rsp_q_virt_addr;
2957 ql_free_send_free_list(qdev);
2959 ql_free_buffer_queues(qdev);
2961 ql_free_net_req_rsp_queues(qdev);
2963 pci_free_consistent(qdev->pdev,
2965 qdev->shadow_reg_virt_addr,
2966 qdev->shadow_reg_phy_addr);
2971 static void ql_free_mem_resources(struct ql3_adapter *qdev)
2973 ql_free_send_free_list(qdev);
2974 ql_free_large_buffers(qdev);
2975 ql_free_small_buffers(qdev);
2976 ql_free_buffer_queues(qdev);
2977 ql_free_net_req_rsp_queues(qdev);
2978 if (qdev->shadow_reg_virt_addr != NULL) {
2979 pci_free_consistent(qdev->pdev,
2981 qdev->shadow_reg_virt_addr,
2982 qdev->shadow_reg_phy_addr);
2983 qdev->shadow_reg_virt_addr = NULL;
2987 static int ql_init_misc_registers(struct ql3_adapter *qdev)
2989 struct ql3xxx_local_ram_registers __iomem *local_ram =
2990 (void __iomem *)qdev->mem_map_registers;
2992 if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2993 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2997 ql_write_page2_reg(qdev,
2998 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
3000 ql_write_page2_reg(qdev,
3001 &local_ram->maxBufletCount,
3002 qdev->nvram_data.bufletCount);
3004 ql_write_page2_reg(qdev,
3005 &local_ram->freeBufletThresholdLow,
3006 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
3007 (qdev->nvram_data.tcpWindowThreshold0));
3009 ql_write_page2_reg(qdev,
3010 &local_ram->freeBufletThresholdHigh,
3011 qdev->nvram_data.tcpWindowThreshold50);
3013 ql_write_page2_reg(qdev,
3014 &local_ram->ipHashTableBase,
3015 (qdev->nvram_data.ipHashTableBaseHi << 16) |
3016 qdev->nvram_data.ipHashTableBaseLo);
3017 ql_write_page2_reg(qdev,
3018 &local_ram->ipHashTableCount,
3019 qdev->nvram_data.ipHashTableSize);
3020 ql_write_page2_reg(qdev,
3021 &local_ram->tcpHashTableBase,
3022 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
3023 qdev->nvram_data.tcpHashTableBaseLo);
3024 ql_write_page2_reg(qdev,
3025 &local_ram->tcpHashTableCount,
3026 qdev->nvram_data.tcpHashTableSize);
3027 ql_write_page2_reg(qdev,
3028 &local_ram->ncbBase,
3029 (qdev->nvram_data.ncbTableBaseHi << 16) |
3030 qdev->nvram_data.ncbTableBaseLo);
3031 ql_write_page2_reg(qdev,
3032 &local_ram->maxNcbCount,
3033 qdev->nvram_data.ncbTableSize);
3034 ql_write_page2_reg(qdev,
3035 &local_ram->drbBase,
3036 (qdev->nvram_data.drbTableBaseHi << 16) |
3037 qdev->nvram_data.drbTableBaseLo);
3038 ql_write_page2_reg(qdev,
3039 &local_ram->maxDrbCount,
3040 qdev->nvram_data.drbTableSize);
3041 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3045 static int ql_adapter_initialize(struct ql3_adapter *qdev)
3048 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3049 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3050 (void __iomem *)port_regs;
3053 unsigned long hw_flags = 0;
3055 if(ql_mii_setup(qdev))
3058 /* Bring out PHY out of reset */
3059 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3060 (ISP_SERIAL_PORT_IF_WE |
3061 (ISP_SERIAL_PORT_IF_WE << 16)));
3062 /* Give the PHY time to come out of reset. */
3064 qdev->port_link_state = LS_DOWN;
3065 netif_carrier_off(qdev->ndev);
3067 /* V2 chip fix for ARS-39168. */
3068 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3069 (ISP_SERIAL_PORT_IF_SDE |
3070 (ISP_SERIAL_PORT_IF_SDE << 16)));
3072 /* Request Queue Registers */
3073 *((u32 *) (qdev->preq_consumer_index)) = 0;
3074 atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
3075 qdev->req_producer_index = 0;
3077 ql_write_page1_reg(qdev,
3078 &hmem_regs->reqConsumerIndexAddrHigh,
3079 qdev->req_consumer_index_phy_addr_high);
3080 ql_write_page1_reg(qdev,
3081 &hmem_regs->reqConsumerIndexAddrLow,
3082 qdev->req_consumer_index_phy_addr_low);
3084 ql_write_page1_reg(qdev,
3085 &hmem_regs->reqBaseAddrHigh,
3086 MS_64BITS(qdev->req_q_phy_addr));
3087 ql_write_page1_reg(qdev,
3088 &hmem_regs->reqBaseAddrLow,
3089 LS_64BITS(qdev->req_q_phy_addr));
3090 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3092 /* Response Queue Registers */
3093 *((__le16 *) (qdev->prsp_producer_index)) = 0;
3094 qdev->rsp_consumer_index = 0;
3095 qdev->rsp_current = qdev->rsp_q_virt_addr;
3097 ql_write_page1_reg(qdev,
3098 &hmem_regs->rspProducerIndexAddrHigh,
3099 qdev->rsp_producer_index_phy_addr_high);
3101 ql_write_page1_reg(qdev,
3102 &hmem_regs->rspProducerIndexAddrLow,
3103 qdev->rsp_producer_index_phy_addr_low);
3105 ql_write_page1_reg(qdev,
3106 &hmem_regs->rspBaseAddrHigh,
3107 MS_64BITS(qdev->rsp_q_phy_addr));
3109 ql_write_page1_reg(qdev,
3110 &hmem_regs->rspBaseAddrLow,
3111 LS_64BITS(qdev->rsp_q_phy_addr));
3113 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3115 /* Large Buffer Queue */
3116 ql_write_page1_reg(qdev,
3117 &hmem_regs->rxLargeQBaseAddrHigh,
3118 MS_64BITS(qdev->lrg_buf_q_phy_addr));
3120 ql_write_page1_reg(qdev,
3121 &hmem_regs->rxLargeQBaseAddrLow,
3122 LS_64BITS(qdev->lrg_buf_q_phy_addr));
3124 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
3126 ql_write_page1_reg(qdev,
3127 &hmem_regs->rxLargeBufferLength,
3128 qdev->lrg_buffer_len);
3130 /* Small Buffer Queue */
3131 ql_write_page1_reg(qdev,
3132 &hmem_regs->rxSmallQBaseAddrHigh,
3133 MS_64BITS(qdev->small_buf_q_phy_addr));
3135 ql_write_page1_reg(qdev,
3136 &hmem_regs->rxSmallQBaseAddrLow,
3137 LS_64BITS(qdev->small_buf_q_phy_addr));
3139 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3140 ql_write_page1_reg(qdev,
3141 &hmem_regs->rxSmallBufferLength,
3142 QL_SMALL_BUFFER_SIZE);
3144 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3145 qdev->small_buf_release_cnt = 8;
3146 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3147 qdev->lrg_buf_release_cnt = 8;
3148 qdev->lrg_buf_next_free =
3149 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
3150 qdev->small_buf_index = 0;
3151 qdev->lrg_buf_index = 0;
3152 qdev->lrg_buf_free_count = 0;
3153 qdev->lrg_buf_free_head = NULL;
3154 qdev->lrg_buf_free_tail = NULL;
3156 ql_write_common_reg(qdev,
3157 &port_regs->CommonRegs.
3158 rxSmallQProducerIndex,
3159 qdev->small_buf_q_producer_index);
3160 ql_write_common_reg(qdev,
3161 &port_regs->CommonRegs.
3162 rxLargeQProducerIndex,
3163 qdev->lrg_buf_q_producer_index);
3166 * Find out if the chip has already been initialized. If it has, then
3167 * we skip some of the initialization.
3169 clear_bit(QL_LINK_MASTER, &qdev->flags);
3170 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3171 if ((value & PORT_STATUS_IC) == 0) {
3173 /* Chip has not been configured yet, so let it rip. */
3174 if(ql_init_misc_registers(qdev)) {
3179 value = qdev->nvram_data.tcpMaxWindowSize;
3180 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3182 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3184 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3185 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3190 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3191 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3192 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3193 16) | (INTERNAL_CHIP_SD |
3194 INTERNAL_CHIP_WE)));
3195 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3198 if (qdev->mac_index)
3199 ql_write_page0_reg(qdev,
3200 &port_regs->mac1MaxFrameLengthReg,
3201 qdev->max_frame_size);
3203 ql_write_page0_reg(qdev,
3204 &port_regs->mac0MaxFrameLengthReg,
3205 qdev->max_frame_size);
3207 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3208 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3215 ql_init_scan_mode(qdev);
3216 ql_get_phy_owner(qdev);
3218 /* Load the MAC Configuration */
3220 /* Program lower 32 bits of the MAC address */
3221 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3222 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3223 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3224 ((qdev->ndev->dev_addr[2] << 24)
3225 | (qdev->ndev->dev_addr[3] << 16)
3226 | (qdev->ndev->dev_addr[4] << 8)
3227 | qdev->ndev->dev_addr[5]));
3229 /* Program top 16 bits of the MAC address */
3230 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3231 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3232 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3233 ((qdev->ndev->dev_addr[0] << 8)
3234 | qdev->ndev->dev_addr[1]));
3236 /* Enable Primary MAC */
3237 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3238 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3239 MAC_ADDR_INDIRECT_PTR_REG_PE));
3241 /* Clear Primary and Secondary IP addresses */
3242 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3243 ((IP_ADDR_INDEX_REG_MASK << 16) |
3244 (qdev->mac_index << 2)));
3245 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3247 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3248 ((IP_ADDR_INDEX_REG_MASK << 16) |
3249 ((qdev->mac_index << 2) + 1)));
3250 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3252 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3254 /* Indicate Configuration Complete */
3255 ql_write_page0_reg(qdev,
3256 &port_regs->portControl,
3257 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3260 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3261 if (value & PORT_STATUS_IC)
3263 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3265 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3269 netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3274 /* Enable Ethernet Function */
3275 if (qdev->device_id == QL3032_DEVICE_ID) {
3277 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
3278 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3279 QL3032_PORT_CONTROL_ET);
3280 ql_write_page0_reg(qdev, &port_regs->functionControl,
3281 ((value << 16) | value));
3284 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3286 ql_write_page0_reg(qdev, &port_regs->portControl,
3287 ((value << 16) | value));
3296 * Caller holds hw_lock.
3298 static int ql_adapter_reset(struct ql3_adapter *qdev)
3300 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3305 set_bit(QL_RESET_ACTIVE, &qdev->flags);
3306 clear_bit(QL_RESET_DONE, &qdev->flags);
3309 * Issue soft reset to chip.
3311 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3312 ql_write_common_reg(qdev,
3313 &port_regs->CommonRegs.ispControlStatus,
3314 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3316 /* Wait 3 seconds for reset to complete. */
3317 netdev_printk(KERN_DEBUG, qdev->ndev,
3318 "Wait 10 milliseconds for reset to complete\n");
3320 /* Wait until the firmware tells us the Soft Reset is done */
3324 ql_read_common_reg(qdev,
3325 &port_regs->CommonRegs.ispControlStatus);
3326 if ((value & ISP_CONTROL_SR) == 0)
3330 } while ((--max_wait_time));
3333 * Also, make sure that the Network Reset Interrupt bit has been
3334 * cleared after the soft reset has taken place.
3337 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3338 if (value & ISP_CONTROL_RI) {
3339 netdev_printk(KERN_DEBUG, qdev->ndev,
3340 "clearing RI after reset\n");
3341 ql_write_common_reg(qdev,
3342 &port_regs->CommonRegs.
3344 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3347 if (max_wait_time == 0) {
3348 /* Issue Force Soft Reset */
3349 ql_write_common_reg(qdev,
3350 &port_regs->CommonRegs.
3352 ((ISP_CONTROL_FSR << 16) |
3355 * Wait until the firmware tells us the Force Soft Reset is
3361 ql_read_common_reg(qdev,
3362 &port_regs->CommonRegs.
3364 if ((value & ISP_CONTROL_FSR) == 0) {
3368 } while ((--max_wait_time));
3370 if (max_wait_time == 0)
3373 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3374 set_bit(QL_RESET_DONE, &qdev->flags);
3378 static void ql_set_mac_info(struct ql3_adapter *qdev)
3380 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3381 u32 value, port_status;
3384 /* Get the function number */
3386 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3387 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3388 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3389 switch (value & ISP_CONTROL_FN_MASK) {
3390 case ISP_CONTROL_FN0_NET:
3391 qdev->mac_index = 0;
3392 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3393 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3394 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3395 if (port_status & PORT_STATUS_SM0)
3396 set_bit(QL_LINK_OPTICAL,&qdev->flags);
3398 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3401 case ISP_CONTROL_FN1_NET:
3402 qdev->mac_index = 1;
3403 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3404 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3405 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3406 if (port_status & PORT_STATUS_SM1)
3407 set_bit(QL_LINK_OPTICAL,&qdev->flags);
3409 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3412 case ISP_CONTROL_FN0_SCSI:
3413 case ISP_CONTROL_FN1_SCSI:
3415 netdev_printk(KERN_DEBUG, qdev->ndev,
3416 "Invalid function number, ispControlStatus = 0x%x\n",
3420 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3423 static void ql_display_dev_info(struct net_device *ndev)
3425 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3426 struct pci_dev *pdev = qdev->pdev;
3429 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
3430 DRV_NAME, qdev->index, qdev->chip_rev_id,
3431 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3433 netdev_info(ndev, "%s Interface\n",
3434 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3437 * Print PCI bus width/type.
3439 netdev_info(ndev, "Bus interface is %s %s\n",
3440 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3441 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3443 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n",
3444 qdev->mem_map_registers);
3445 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
3447 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3450 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3452 struct net_device *ndev = qdev->ndev;
3455 netif_stop_queue(ndev);
3456 netif_carrier_off(ndev);
3458 clear_bit(QL_ADAPTER_UP,&qdev->flags);
3459 clear_bit(QL_LINK_MASTER,&qdev->flags);
3461 ql_disable_interrupts(qdev);
3463 free_irq(qdev->pdev->irq, ndev);
3465 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3466 netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3467 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3468 pci_disable_msi(qdev->pdev);
3471 del_timer_sync(&qdev->adapter_timer);
3473 napi_disable(&qdev->napi);
3477 unsigned long hw_flags;
3479 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3480 if (ql_wait_for_drvr_lock(qdev)) {
3481 if ((soft_reset = ql_adapter_reset(qdev))) {
3482 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
3486 "Releasing driver lock via chip reset\n");
3489 "Could not acquire driver lock to do reset!\n");
3492 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3494 ql_free_mem_resources(qdev);
3498 static int ql_adapter_up(struct ql3_adapter *qdev)
3500 struct net_device *ndev = qdev->ndev;
3502 unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
3503 unsigned long hw_flags;
3505 if (ql_alloc_mem_resources(qdev)) {
3506 netdev_err(ndev, "Unable to allocate buffers\n");
3511 if (pci_enable_msi(qdev->pdev)) {
3513 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n");
3516 netdev_info(ndev, "MSI Enabled...\n");
3517 set_bit(QL_MSI_ENABLED,&qdev->flags);
3518 irq_flags &= ~IRQF_SHARED;
3522 if ((err = request_irq(qdev->pdev->irq,
3524 irq_flags, ndev->name, ndev))) {
3526 "Failed to reserve interrupt %d already in use\n",
3531 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3533 if ((err = ql_wait_for_drvr_lock(qdev))) {
3534 if ((err = ql_adapter_initialize(qdev))) {
3535 netdev_err(ndev, "Unable to initialize adapter\n");
3538 netdev_err(ndev, "Releasing driver lock\n");
3539 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3541 netdev_err(ndev, "Could not acquire driver lock\n");
3545 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3547 set_bit(QL_ADAPTER_UP,&qdev->flags);
3549 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3551 napi_enable(&qdev->napi);
3552 ql_enable_interrupts(qdev);
3556 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3558 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3559 free_irq(qdev->pdev->irq, ndev);
3561 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3562 netdev_info(ndev, "calling pci_disable_msi()\n");
3563 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3564 pci_disable_msi(qdev->pdev);
3569 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3571 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3572 netdev_err(qdev->ndev,
3573 "Driver up/down cycle failed, closing device\n");
3575 dev_close(qdev->ndev);
3582 static int ql3xxx_close(struct net_device *ndev)
3584 struct ql3_adapter *qdev = netdev_priv(ndev);
3587 * Wait for device to recover from a reset.
3588 * (Rarely happens, but possible.)
3590 while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
3593 ql_adapter_down(qdev,QL_DO_RESET);
3597 static int ql3xxx_open(struct net_device *ndev)
3599 struct ql3_adapter *qdev = netdev_priv(ndev);
3600 return (ql_adapter_up(qdev));
3603 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3605 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3606 struct ql3xxx_port_registers __iomem *port_regs =
3607 qdev->mem_map_registers;
3608 struct sockaddr *addr = p;
3609 unsigned long hw_flags;
3611 if (netif_running(ndev))
3614 if (!is_valid_ether_addr(addr->sa_data))
3615 return -EADDRNOTAVAIL;
3617 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3619 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3620 /* Program lower 32 bits of the MAC address */
3621 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3622 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3623 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3624 ((ndev->dev_addr[2] << 24) | (ndev->
3625 dev_addr[3] << 16) |
3626 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3628 /* Program top 16 bits of the MAC address */
3629 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3630 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3631 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3632 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3633 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3638 static void ql3xxx_tx_timeout(struct net_device *ndev)
3640 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3642 netdev_err(ndev, "Resetting...\n");
3644 * Stop the queues, we've got a problem.
3646 netif_stop_queue(ndev);
3649 * Wake up the worker to process this event.
3651 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3654 static void ql_reset_work(struct work_struct *work)
3656 struct ql3_adapter *qdev =
3657 container_of(work, struct ql3_adapter, reset_work.work);
3658 struct net_device *ndev = qdev->ndev;
3660 struct ql_tx_buf_cb *tx_cb;
3661 int max_wait_time, i;
3662 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3663 unsigned long hw_flags;
3665 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
3666 clear_bit(QL_LINK_MASTER,&qdev->flags);
3669 * Loop through the active list and return the skb.
3671 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3673 tx_cb = &qdev->tx_buf[i];
3675 netdev_printk(KERN_DEBUG, ndev,
3676 "Freeing lost SKB\n");
3677 pci_unmap_single(qdev->pdev,
3678 dma_unmap_addr(&tx_cb->map[0], mapaddr),
3679 dma_unmap_len(&tx_cb->map[0], maplen),
3681 for(j=1;j<tx_cb->seg_count;j++) {
3682 pci_unmap_page(qdev->pdev,
3683 dma_unmap_addr(&tx_cb->map[j],mapaddr),
3684 dma_unmap_len(&tx_cb->map[j],maplen),
3687 dev_kfree_skb(tx_cb->skb);
3692 netdev_err(ndev, "Clearing NRI after reset\n");
3693 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3694 ql_write_common_reg(qdev,
3695 &port_regs->CommonRegs.
3697 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3699 * Wait the for Soft Reset to Complete.
3703 value = ql_read_common_reg(qdev,
3704 &port_regs->CommonRegs.
3707 if ((value & ISP_CONTROL_SR) == 0) {
3708 netdev_printk(KERN_DEBUG, ndev,
3709 "reset completed\n");
3713 if (value & ISP_CONTROL_RI) {
3714 netdev_printk(KERN_DEBUG, ndev,
3715 "clearing NRI after reset\n");
3716 ql_write_common_reg(qdev,
3721 16) | ISP_CONTROL_RI));
3724 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3726 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3727 } while (--max_wait_time);
3728 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3730 if (value & ISP_CONTROL_SR) {
3733 * Set the reset flags and clear the board again.
3734 * Nothing else to do...
3737 "Timed out waiting for reset to complete\n");
3738 netdev_err(ndev, "Do a reset\n");
3739 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3740 clear_bit(QL_RESET_START,&qdev->flags);
3741 ql_cycle_adapter(qdev,QL_DO_RESET);
3745 clear_bit(QL_RESET_ACTIVE,&qdev->flags);
3746 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3747 clear_bit(QL_RESET_START,&qdev->flags);
3748 ql_cycle_adapter(qdev,QL_NO_RESET);
3752 static void ql_tx_timeout_work(struct work_struct *work)
3754 struct ql3_adapter *qdev =
3755 container_of(work, struct ql3_adapter, tx_timeout_work.work);
3757 ql_cycle_adapter(qdev, QL_DO_RESET);
3760 static void ql_get_board_info(struct ql3_adapter *qdev)
3762 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3765 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3767 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3768 if (value & PORT_STATUS_64)
3769 qdev->pci_width = 64;
3771 qdev->pci_width = 32;
3772 if (value & PORT_STATUS_X)
3776 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3779 static void ql3xxx_timer(unsigned long ptr)
3781 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3782 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3785 static const struct net_device_ops ql3xxx_netdev_ops = {
3786 .ndo_open = ql3xxx_open,
3787 .ndo_start_xmit = ql3xxx_send,
3788 .ndo_stop = ql3xxx_close,
3789 .ndo_set_multicast_list = NULL, /* not allowed on NIC side */
3790 .ndo_change_mtu = eth_change_mtu,
3791 .ndo_validate_addr = eth_validate_addr,
3792 .ndo_set_mac_address = ql3xxx_set_mac_address,
3793 .ndo_tx_timeout = ql3xxx_tx_timeout,
3796 static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3797 const struct pci_device_id *pci_entry)
3799 struct net_device *ndev = NULL;
3800 struct ql3_adapter *qdev = NULL;
3801 static int cards_found = 0;
3802 int uninitialized_var(pci_using_dac), err;
3804 err = pci_enable_device(pdev);
3806 pr_err("%s cannot enable PCI device\n", pci_name(pdev));
3810 err = pci_request_regions(pdev, DRV_NAME);
3812 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
3813 goto err_out_disable_pdev;
3816 pci_set_master(pdev);
3818 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3820 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3821 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
3823 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3827 pr_err("%s no usable DMA configuration\n", pci_name(pdev));
3828 goto err_out_free_regions;
3831 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3833 pr_err("%s could not alloc etherdev\n", pci_name(pdev));
3835 goto err_out_free_regions;
3838 SET_NETDEV_DEV(ndev, &pdev->dev);
3840 pci_set_drvdata(pdev, ndev);
3842 qdev = netdev_priv(ndev);
3843 qdev->index = cards_found;
3846 qdev->device_id = pci_entry->device;
3847 qdev->port_link_state = LS_DOWN;
3851 qdev->msg_enable = netif_msg_init(debug, default_msg);
3854 ndev->features |= NETIF_F_HIGHDMA;
3855 if (qdev->device_id == QL3032_DEVICE_ID)
3856 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3858 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3859 if (!qdev->mem_map_registers) {
3860 pr_err("%s: cannot map device registers\n", pci_name(pdev));
3862 goto err_out_free_ndev;
3865 spin_lock_init(&qdev->adapter_lock);
3866 spin_lock_init(&qdev->hw_lock);
3868 /* Set driver entry points */
3869 ndev->netdev_ops = &ql3xxx_netdev_ops;
3870 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3871 ndev->watchdog_timeo = 5 * HZ;
3873 netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
3875 ndev->irq = pdev->irq;
3877 /* make sure the EEPROM is good */
3878 if (ql_get_nvram_params(qdev)) {
3879 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
3880 __func__, qdev->index);
3882 goto err_out_iounmap;
3885 ql_set_mac_info(qdev);
3887 /* Validate and set parameters */
3888 if (qdev->mac_index) {
3889 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3890 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
3892 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3893 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
3895 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3897 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
3899 /* Record PCI bus information. */
3900 ql_get_board_info(qdev);
3903 * Set the Maximum Memory Read Byte Count value. We do this to handle
3907 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3910 err = register_netdev(ndev);
3912 pr_err("%s: cannot register net device\n", pci_name(pdev));
3913 goto err_out_iounmap;
3916 /* we're going to reset, so assume we have no link for now */
3918 netif_carrier_off(ndev);
3919 netif_stop_queue(ndev);
3921 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3922 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3923 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3924 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
3926 init_timer(&qdev->adapter_timer);
3927 qdev->adapter_timer.function = ql3xxx_timer;
3928 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3929 qdev->adapter_timer.data = (unsigned long)qdev;
3932 pr_alert("%s\n", DRV_STRING);
3933 pr_alert("Driver name: %s, Version: %s\n",
3934 DRV_NAME, DRV_VERSION);
3936 ql_display_dev_info(ndev);
3942 iounmap(qdev->mem_map_registers);
3945 err_out_free_regions:
3946 pci_release_regions(pdev);
3947 err_out_disable_pdev:
3948 pci_disable_device(pdev);
3949 pci_set_drvdata(pdev, NULL);
3954 static void __devexit ql3xxx_remove(struct pci_dev *pdev)
3956 struct net_device *ndev = pci_get_drvdata(pdev);
3957 struct ql3_adapter *qdev = netdev_priv(ndev);
3959 unregister_netdev(ndev);
3961 ql_disable_interrupts(qdev);
3963 if (qdev->workqueue) {
3964 cancel_delayed_work(&qdev->reset_work);
3965 cancel_delayed_work(&qdev->tx_timeout_work);
3966 destroy_workqueue(qdev->workqueue);
3967 qdev->workqueue = NULL;
3970 iounmap(qdev->mem_map_registers);
3971 pci_release_regions(pdev);
3972 pci_set_drvdata(pdev, NULL);
3976 static struct pci_driver ql3xxx_driver = {
3979 .id_table = ql3xxx_pci_tbl,
3980 .probe = ql3xxx_probe,
3981 .remove = __devexit_p(ql3xxx_remove),
3984 static int __init ql3xxx_init_module(void)
3986 return pci_register_driver(&ql3xxx_driver);
3989 static void __exit ql3xxx_exit(void)
3991 pci_unregister_driver(&ql3xxx_driver);
3994 module_init(ql3xxx_init_module);
3995 module_exit(ql3xxx_exit);