2 * Copyright(c) 2007 Atheros Corporation. All rights reserved.
4 * Derived from Intel e1000 driver
5 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 #define DRV_VERSION "1.0.0.7-NAPI"
26 char atl1e_driver_name[] = "ATL1E";
27 char atl1e_driver_version[] = DRV_VERSION;
28 #define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026
30 * atl1e_pci_tbl - PCI Device ID Table
32 * Wildcard entries (PCI_ANY_ID) should come last
33 * Last entry must be all 0s
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) }
38 static const struct pci_device_id atl1e_pci_tbl[] = {
39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
41 /* required last entry */
44 MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl);
46 MODULE_AUTHOR("Atheros Corporation, <xiong.huang@atheros.com>, Jie Yang <jie.yang@atheros.com>");
47 MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
48 MODULE_LICENSE("GPL");
49 MODULE_VERSION(DRV_VERSION);
51 static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
54 atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
56 {REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD},
57 {REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD},
58 {REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD},
59 {REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD}
62 static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] =
64 REG_RXF0_BASE_ADDR_HI,
65 REG_RXF1_BASE_ADDR_HI,
66 REG_RXF2_BASE_ADDR_HI,
71 atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
73 {REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO},
74 {REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO},
75 {REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO},
76 {REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO}
80 atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
82 {REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO},
83 {REG_HOST_RXF1_MB0_LO, REG_HOST_RXF1_MB1_LO},
84 {REG_HOST_RXF2_MB0_LO, REG_HOST_RXF2_MB1_LO},
85 {REG_HOST_RXF3_MB0_LO, REG_HOST_RXF3_MB1_LO}
88 static const u16 atl1e_pay_load_size[] = {
89 128, 256, 512, 1024, 2048, 4096,
93 * atl1e_irq_enable - Enable default interrupt generation settings
94 * @adapter: board private structure
96 static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
98 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
99 AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
100 AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
101 AT_WRITE_FLUSH(&adapter->hw);
106 * atl1e_irq_disable - Mask off interrupt generation on the NIC
107 * @adapter: board private structure
109 static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
111 atomic_inc(&adapter->irq_sem);
112 AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
113 AT_WRITE_FLUSH(&adapter->hw);
114 synchronize_irq(adapter->pdev->irq);
118 * atl1e_irq_reset - reset interrupt confiure on the NIC
119 * @adapter: board private structure
121 static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
123 atomic_set(&adapter->irq_sem, 0);
124 AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
125 AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
126 AT_WRITE_FLUSH(&adapter->hw);
130 * atl1e_phy_config - Timer Call-back
131 * @data: pointer to netdev cast into an unsigned long
133 static void atl1e_phy_config(struct timer_list *t)
135 struct atl1e_adapter *adapter = from_timer(adapter, t,
137 struct atl1e_hw *hw = &adapter->hw;
140 spin_lock_irqsave(&adapter->mdio_lock, flags);
141 atl1e_restart_autoneg(hw);
142 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
145 void atl1e_reinit_locked(struct atl1e_adapter *adapter)
148 WARN_ON(in_interrupt());
149 while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
153 clear_bit(__AT_RESETTING, &adapter->flags);
156 static void atl1e_reset_task(struct work_struct *work)
158 struct atl1e_adapter *adapter;
159 adapter = container_of(work, struct atl1e_adapter, reset_task);
161 atl1e_reinit_locked(adapter);
164 static int atl1e_check_link(struct atl1e_adapter *adapter)
166 struct atl1e_hw *hw = &adapter->hw;
167 struct net_device *netdev = adapter->netdev;
169 u16 speed, duplex, phy_data;
171 /* MII_BMSR must read twice */
172 atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
173 atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
174 if ((phy_data & BMSR_LSTATUS) == 0) {
176 if (netif_carrier_ok(netdev)) { /* old link state: Up */
179 value = AT_READ_REG(hw, REG_MAC_CTRL);
180 value &= ~MAC_CTRL_RX_EN;
181 AT_WRITE_REG(hw, REG_MAC_CTRL, value);
182 adapter->link_speed = SPEED_0;
183 netif_carrier_off(netdev);
184 netif_stop_queue(netdev);
188 err = atl1e_get_speed_and_duplex(hw, &speed, &duplex);
192 /* link result is our setting */
193 if (adapter->link_speed != speed ||
194 adapter->link_duplex != duplex) {
195 adapter->link_speed = speed;
196 adapter->link_duplex = duplex;
197 atl1e_setup_mac_ctrl(adapter);
199 "NIC Link is Up <%d Mbps %s Duplex>\n",
201 adapter->link_duplex == FULL_DUPLEX ?
205 if (!netif_carrier_ok(netdev)) {
206 /* Link down -> Up */
207 netif_carrier_on(netdev);
208 netif_wake_queue(netdev);
215 * atl1e_link_chg_task - deal with link change event Out of interrupt context
216 * @netdev: network interface device structure
218 static void atl1e_link_chg_task(struct work_struct *work)
220 struct atl1e_adapter *adapter;
223 adapter = container_of(work, struct atl1e_adapter, link_chg_task);
224 spin_lock_irqsave(&adapter->mdio_lock, flags);
225 atl1e_check_link(adapter);
226 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
229 static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
231 struct net_device *netdev = adapter->netdev;
235 spin_lock(&adapter->mdio_lock);
236 atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
237 atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
238 spin_unlock(&adapter->mdio_lock);
239 link_up = phy_data & BMSR_LSTATUS;
240 /* notify upper layer link down ASAP */
242 if (netif_carrier_ok(netdev)) {
243 /* old link state: Up */
244 netdev_info(netdev, "NIC Link is Down\n");
245 adapter->link_speed = SPEED_0;
246 netif_stop_queue(netdev);
249 schedule_work(&adapter->link_chg_task);
252 static void atl1e_del_timer(struct atl1e_adapter *adapter)
254 del_timer_sync(&adapter->phy_config_timer);
257 static void atl1e_cancel_work(struct atl1e_adapter *adapter)
259 cancel_work_sync(&adapter->reset_task);
260 cancel_work_sync(&adapter->link_chg_task);
264 * atl1e_tx_timeout - Respond to a Tx Hang
265 * @netdev: network interface device structure
267 static void atl1e_tx_timeout(struct net_device *netdev)
269 struct atl1e_adapter *adapter = netdev_priv(netdev);
271 /* Do the reset outside of interrupt context */
272 schedule_work(&adapter->reset_task);
276 * atl1e_set_multi - Multicast and Promiscuous mode set
277 * @netdev: network interface device structure
279 * The set_multi entry point is called whenever the multicast address
280 * list or the network interface flags are updated. This routine is
281 * responsible for configuring the hardware for proper multicast,
282 * promiscuous mode, and all-multi behavior.
284 static void atl1e_set_multi(struct net_device *netdev)
286 struct atl1e_adapter *adapter = netdev_priv(netdev);
287 struct atl1e_hw *hw = &adapter->hw;
288 struct netdev_hw_addr *ha;
289 u32 mac_ctrl_data = 0;
292 /* Check for Promiscuous and All Multicast modes */
293 mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL);
295 if (netdev->flags & IFF_PROMISC) {
296 mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
297 } else if (netdev->flags & IFF_ALLMULTI) {
298 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
299 mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
301 mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
304 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
306 /* clear the old settings from the multicast hash table */
307 AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
308 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
310 /* comoute mc addresses' hash value ,and put it into hash table */
311 netdev_for_each_mc_addr(ha, netdev) {
312 hash_value = atl1e_hash_mc_addr(hw, ha->addr);
313 atl1e_hash_set(hw, hash_value);
317 static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data)
320 if (features & NETIF_F_RXALL) {
321 /* enable RX of ALL frames */
322 *mac_ctrl_data |= MAC_CTRL_DBG;
324 /* disable RX of ALL frames */
325 *mac_ctrl_data &= ~MAC_CTRL_DBG;
329 static void atl1e_rx_mode(struct net_device *netdev,
330 netdev_features_t features)
332 struct atl1e_adapter *adapter = netdev_priv(netdev);
333 u32 mac_ctrl_data = 0;
335 netdev_dbg(adapter->netdev, "%s\n", __func__);
337 atl1e_irq_disable(adapter);
338 mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
339 __atl1e_rx_mode(features, &mac_ctrl_data);
340 AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
341 atl1e_irq_enable(adapter);
345 static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
347 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
348 /* enable VLAN tag insert/strip */
349 *mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
351 /* disable VLAN tag insert/strip */
352 *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
356 static void atl1e_vlan_mode(struct net_device *netdev,
357 netdev_features_t features)
359 struct atl1e_adapter *adapter = netdev_priv(netdev);
360 u32 mac_ctrl_data = 0;
362 netdev_dbg(adapter->netdev, "%s\n", __func__);
364 atl1e_irq_disable(adapter);
365 mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
366 __atl1e_vlan_mode(features, &mac_ctrl_data);
367 AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
368 atl1e_irq_enable(adapter);
371 static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
373 netdev_dbg(adapter->netdev, "%s\n", __func__);
374 atl1e_vlan_mode(adapter->netdev, adapter->netdev->features);
378 * atl1e_set_mac - Change the Ethernet Address of the NIC
379 * @netdev: network interface device structure
380 * @p: pointer to an address structure
382 * Returns 0 on success, negative on failure
384 static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
386 struct atl1e_adapter *adapter = netdev_priv(netdev);
387 struct sockaddr *addr = p;
389 if (!is_valid_ether_addr(addr->sa_data))
390 return -EADDRNOTAVAIL;
392 if (netif_running(netdev))
395 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
396 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
398 atl1e_hw_set_mac_addr(&adapter->hw);
403 static netdev_features_t atl1e_fix_features(struct net_device *netdev,
404 netdev_features_t features)
407 * Since there is no support for separate rx/tx vlan accel
408 * enable/disable make sure tx flag is always in same state as rx.
410 if (features & NETIF_F_HW_VLAN_CTAG_RX)
411 features |= NETIF_F_HW_VLAN_CTAG_TX;
413 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
418 static int atl1e_set_features(struct net_device *netdev,
419 netdev_features_t features)
421 netdev_features_t changed = netdev->features ^ features;
423 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
424 atl1e_vlan_mode(netdev, features);
426 if (changed & NETIF_F_RXALL)
427 atl1e_rx_mode(netdev, features);
434 * atl1e_change_mtu - Change the Maximum Transfer Unit
435 * @netdev: network interface device structure
436 * @new_mtu: new value for maximum frame size
438 * Returns 0 on success, negative on failure
440 static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
442 struct atl1e_adapter *adapter = netdev_priv(netdev);
443 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
446 if (netif_running(netdev)) {
447 while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
449 netdev->mtu = new_mtu;
450 adapter->hw.max_frame_size = new_mtu;
451 adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3;
454 clear_bit(__AT_RESETTING, &adapter->flags);
460 * caller should hold mdio_lock
462 static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
464 struct atl1e_adapter *adapter = netdev_priv(netdev);
467 atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
471 static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
472 int reg_num, int val)
474 struct atl1e_adapter *adapter = netdev_priv(netdev);
476 if (atl1e_write_phy_reg(&adapter->hw,
477 reg_num & MDIO_REG_ADDR_MASK, val))
478 netdev_err(netdev, "write phy register failed\n");
481 static int atl1e_mii_ioctl(struct net_device *netdev,
482 struct ifreq *ifr, int cmd)
484 struct atl1e_adapter *adapter = netdev_priv(netdev);
485 struct mii_ioctl_data *data = if_mii(ifr);
489 if (!netif_running(netdev))
492 spin_lock_irqsave(&adapter->mdio_lock, flags);
499 if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
507 if (data->reg_num & ~(0x1F)) {
512 netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n",
513 data->reg_num, data->val_in);
514 if (atl1e_write_phy_reg(&adapter->hw,
515 data->reg_num, data->val_in)) {
522 retval = -EOPNOTSUPP;
526 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
531 static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
537 return atl1e_mii_ioctl(netdev, ifr, cmd);
543 static void atl1e_setup_pcicmd(struct pci_dev *pdev)
547 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
548 cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO);
549 cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
550 pci_write_config_word(pdev, PCI_COMMAND, cmd);
553 * some motherboards BIOS(PXE/EFI) driver may set PME
554 * while they transfer control to OS (Windows/Linux)
555 * so we should clear this bit before NIC work normally
557 pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
562 * atl1e_alloc_queues - Allocate memory for all rings
563 * @adapter: board private structure to initialize
566 static int atl1e_alloc_queues(struct atl1e_adapter *adapter)
572 * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
573 * @adapter: board private structure to initialize
575 * atl1e_sw_init initializes the Adapter private data structure.
576 * Fields are initialized based on PCI device information and
577 * OS network device settings (MTU size).
579 static int atl1e_sw_init(struct atl1e_adapter *adapter)
581 struct atl1e_hw *hw = &adapter->hw;
582 struct pci_dev *pdev = adapter->pdev;
583 u32 phy_status_data = 0;
586 adapter->link_speed = SPEED_0; /* hardware init */
587 adapter->link_duplex = FULL_DUPLEX;
588 adapter->num_rx_queues = 1;
590 /* PCI config space info */
591 hw->vendor_id = pdev->vendor;
592 hw->device_id = pdev->device;
593 hw->subsystem_vendor_id = pdev->subsystem_vendor;
594 hw->subsystem_id = pdev->subsystem_device;
595 hw->revision_id = pdev->revision;
597 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
599 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
601 if (hw->revision_id >= 0xF0) {
602 hw->nic_type = athr_l2e_revB;
604 if (phy_status_data & PHY_STATUS_100M)
605 hw->nic_type = athr_l1e;
607 hw->nic_type = athr_l2e_revA;
610 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
612 if (phy_status_data & PHY_STATUS_EMI_CA)
617 hw->phy_configured = false;
618 hw->preamble_len = 7;
619 hw->max_frame_size = adapter->netdev->mtu;
620 hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN +
621 VLAN_HLEN + ETH_FCS_LEN + 7) >> 3;
623 hw->rrs_type = atl1e_rrs_disable;
624 hw->indirect_tab = 0;
629 hw->ict = 50000; /* 100ms */
630 hw->smb_timer = 200000; /* 200ms */
633 hw->tpd_thresh = adapter->tx_ring.count / 2;
634 hw->rx_count_down = 4; /* 2us resolution */
635 hw->tx_count_down = hw->imt * 4 / 3;
636 hw->dmar_block = atl1e_dma_req_1024;
637 hw->dmaw_block = atl1e_dma_req_1024;
638 hw->dmar_dly_cnt = 15;
639 hw->dmaw_dly_cnt = 4;
641 if (atl1e_alloc_queues(adapter)) {
642 netdev_err(adapter->netdev, "Unable to allocate memory for queues\n");
646 atomic_set(&adapter->irq_sem, 1);
647 spin_lock_init(&adapter->mdio_lock);
649 set_bit(__AT_DOWN, &adapter->flags);
655 * atl1e_clean_tx_ring - Free Tx-skb
656 * @adapter: board private structure
658 static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
660 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
661 struct atl1e_tx_buffer *tx_buffer = NULL;
662 struct pci_dev *pdev = adapter->pdev;
663 u16 index, ring_count;
665 if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL)
668 ring_count = tx_ring->count;
669 /* first unmmap dma */
670 for (index = 0; index < ring_count; index++) {
671 tx_buffer = &tx_ring->tx_buffer[index];
672 if (tx_buffer->dma) {
673 if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
674 pci_unmap_single(pdev, tx_buffer->dma,
675 tx_buffer->length, PCI_DMA_TODEVICE);
676 else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
677 pci_unmap_page(pdev, tx_buffer->dma,
678 tx_buffer->length, PCI_DMA_TODEVICE);
682 /* second free skb */
683 for (index = 0; index < ring_count; index++) {
684 tx_buffer = &tx_ring->tx_buffer[index];
685 if (tx_buffer->skb) {
686 dev_kfree_skb_any(tx_buffer->skb);
687 tx_buffer->skb = NULL;
690 /* Zero out Tx-buffers */
691 memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) *
693 memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) *
698 * atl1e_clean_rx_ring - Free rx-reservation skbs
699 * @adapter: board private structure
701 static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
703 struct atl1e_rx_ring *rx_ring =
705 struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
709 if (adapter->ring_vir_addr == NULL)
711 /* Zero out the descriptor ring */
712 for (i = 0; i < adapter->num_rx_queues; i++) {
713 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
714 if (rx_page_desc[i].rx_page[j].addr != NULL) {
715 memset(rx_page_desc[i].rx_page[j].addr, 0,
716 rx_ring->real_page_size);
722 static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size)
724 *ring_size = ((u32)(adapter->tx_ring.count *
725 sizeof(struct atl1e_tpd_desc) + 7
726 /* tx ring, qword align */
727 + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE *
728 adapter->num_rx_queues + 31
729 /* rx ring, 32 bytes align */
730 + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) *
732 /* tx, rx cmd, dword align */
735 static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
737 struct atl1e_rx_ring *rx_ring = NULL;
739 rx_ring = &adapter->rx_ring;
741 rx_ring->real_page_size = adapter->rx_ring.page_size
742 + adapter->hw.max_frame_size
743 + ETH_HLEN + VLAN_HLEN
745 rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32);
746 atl1e_cal_ring_size(adapter, &adapter->ring_size);
748 adapter->ring_vir_addr = NULL;
749 adapter->rx_ring.desc = NULL;
750 rwlock_init(&adapter->tx_ring.tx_lock);
754 * Read / Write Ptr Initialize:
756 static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
758 struct atl1e_tx_ring *tx_ring = NULL;
759 struct atl1e_rx_ring *rx_ring = NULL;
760 struct atl1e_rx_page_desc *rx_page_desc = NULL;
763 tx_ring = &adapter->tx_ring;
764 rx_ring = &adapter->rx_ring;
765 rx_page_desc = rx_ring->rx_page_desc;
767 tx_ring->next_to_use = 0;
768 atomic_set(&tx_ring->next_to_clean, 0);
770 for (i = 0; i < adapter->num_rx_queues; i++) {
771 rx_page_desc[i].rx_using = 0;
772 rx_page_desc[i].rx_nxseq = 0;
773 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
774 *rx_page_desc[i].rx_page[j].write_offset_addr = 0;
775 rx_page_desc[i].rx_page[j].read_offset = 0;
781 * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
782 * @adapter: board private structure
784 * Free all transmit software resources
786 static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
788 struct pci_dev *pdev = adapter->pdev;
790 atl1e_clean_tx_ring(adapter);
791 atl1e_clean_rx_ring(adapter);
793 if (adapter->ring_vir_addr) {
794 pci_free_consistent(pdev, adapter->ring_size,
795 adapter->ring_vir_addr, adapter->ring_dma);
796 adapter->ring_vir_addr = NULL;
799 if (adapter->tx_ring.tx_buffer) {
800 kfree(adapter->tx_ring.tx_buffer);
801 adapter->tx_ring.tx_buffer = NULL;
806 * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
807 * @adapter: board private structure
809 * Return 0 on success, negative on failure
811 static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
813 struct pci_dev *pdev = adapter->pdev;
814 struct atl1e_tx_ring *tx_ring;
815 struct atl1e_rx_ring *rx_ring;
816 struct atl1e_rx_page_desc *rx_page_desc;
821 if (adapter->ring_vir_addr != NULL)
822 return 0; /* alloced already */
824 tx_ring = &adapter->tx_ring;
825 rx_ring = &adapter->rx_ring;
827 /* real ring DMA buffer */
829 size = adapter->ring_size;
830 adapter->ring_vir_addr = pci_zalloc_consistent(pdev, adapter->ring_size,
832 if (adapter->ring_vir_addr == NULL) {
833 netdev_err(adapter->netdev,
834 "pci_alloc_consistent failed, size = D%d\n", size);
838 rx_page_desc = rx_ring->rx_page_desc;
841 tx_ring->dma = roundup(adapter->ring_dma, 8);
842 offset = tx_ring->dma - adapter->ring_dma;
843 tx_ring->desc = adapter->ring_vir_addr + offset;
844 size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
845 tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
846 if (tx_ring->tx_buffer == NULL) {
852 offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count);
853 offset = roundup(offset, 32);
855 for (i = 0; i < adapter->num_rx_queues; i++) {
856 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
857 rx_page_desc[i].rx_page[j].dma =
858 adapter->ring_dma + offset;
859 rx_page_desc[i].rx_page[j].addr =
860 adapter->ring_vir_addr + offset;
861 offset += rx_ring->real_page_size;
865 /* Init CMB dma address */
866 tx_ring->cmb_dma = adapter->ring_dma + offset;
867 tx_ring->cmb = adapter->ring_vir_addr + offset;
868 offset += sizeof(u32);
870 for (i = 0; i < adapter->num_rx_queues; i++) {
871 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
872 rx_page_desc[i].rx_page[j].write_offset_dma =
873 adapter->ring_dma + offset;
874 rx_page_desc[i].rx_page[j].write_offset_addr =
875 adapter->ring_vir_addr + offset;
876 offset += sizeof(u32);
880 if (unlikely(offset > adapter->ring_size)) {
881 netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
882 offset, adapter->ring_size);
889 if (adapter->ring_vir_addr != NULL) {
890 pci_free_consistent(pdev, adapter->ring_size,
891 adapter->ring_vir_addr, adapter->ring_dma);
892 adapter->ring_vir_addr = NULL;
897 static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter)
900 struct atl1e_hw *hw = &adapter->hw;
901 struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
902 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
903 struct atl1e_rx_page_desc *rx_page_desc = NULL;
906 AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
907 (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32));
908 AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO,
909 (u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK));
910 AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count));
911 AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO,
912 (u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK));
914 rx_page_desc = rx_ring->rx_page_desc;
915 /* RXF Page Physical address / Page Length */
916 for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) {
917 AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i],
918 (u32)((adapter->ring_dma &
919 AT_DMA_HI_ADDR_MASK) >> 32));
920 for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
924 page_phy_addr = rx_page_desc[i].rx_page[j].dma;
926 rx_page_desc[i].rx_page[j].write_offset_dma;
928 AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j],
929 page_phy_addr & AT_DMA_LO_ADDR_MASK);
930 AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j],
931 offset_phy_addr & AT_DMA_LO_ADDR_MASK);
932 AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1);
936 AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
937 /* Load all of base address above */
938 AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
941 static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
943 struct atl1e_hw *hw = &adapter->hw;
944 u32 dev_ctrl_data = 0;
945 u32 max_pay_load = 0;
946 u32 jumbo_thresh = 0;
947 u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
949 /* configure TXQ param */
950 if (hw->nic_type != athr_l2e_revB) {
951 extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
952 if (hw->max_frame_size <= 1500) {
953 jumbo_thresh = hw->max_frame_size + extra_size;
954 } else if (hw->max_frame_size < 6*1024) {
956 (hw->max_frame_size + extra_size) * 2 / 3;
958 jumbo_thresh = (hw->max_frame_size + extra_size) / 2;
960 AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3);
963 dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL);
965 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
966 DEVICE_CTRL_MAX_PAYLOAD_MASK;
968 hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
970 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
971 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
972 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
974 if (hw->nic_type != athr_l2e_revB)
975 AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
976 atl1e_pay_load_size[hw->dmar_block]);
978 AT_WRITE_REGW(hw, REG_TXQ_CTRL,
979 (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK)
980 << TXQ_CTRL_NUM_TPD_BURST_SHIFT)
981 | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
984 static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
986 struct atl1e_hw *hw = &adapter->hw;
990 u32 rxf_thresh_data = 0;
991 u32 rxq_ctrl_data = 0;
993 if (hw->nic_type != athr_l2e_revB) {
994 AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM,
995 (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) <<
996 RXQ_JMBOSZ_TH_SHIFT |
997 (1 & RXQ_JMBO_LKAH_MASK) <<
998 RXQ_JMBO_LKAH_SHIFT));
1000 rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
1001 rxf_high = rxf_len * 4 / 5;
1002 rxf_low = rxf_len / 5;
1003 rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK)
1004 << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
1005 ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK)
1006 << RXQ_RXF_PAUSE_TH_LO_SHIFT);
1008 AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data);
1012 AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab);
1013 AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
1015 if (hw->rrs_type & atl1e_rrs_ipv4)
1016 rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4;
1018 if (hw->rrs_type & atl1e_rrs_ipv4_tcp)
1019 rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP;
1021 if (hw->rrs_type & atl1e_rrs_ipv6)
1022 rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6;
1024 if (hw->rrs_type & atl1e_rrs_ipv6_tcp)
1025 rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP;
1027 if (hw->rrs_type != atl1e_rrs_disable)
1029 (RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT);
1031 rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 |
1032 RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
1034 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1037 static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
1039 struct atl1e_hw *hw = &adapter->hw;
1040 u32 dma_ctrl_data = 0;
1042 dma_ctrl_data = DMA_CTRL_RXCMB_EN;
1043 dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1044 << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
1045 dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
1046 << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
1047 dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER;
1048 dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
1049 << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
1050 dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
1051 << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
1053 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
1056 static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
1059 struct atl1e_hw *hw = &adapter->hw;
1060 struct net_device *netdev = adapter->netdev;
1062 /* Config MAC CTRL Register */
1063 value = MAC_CTRL_TX_EN |
1066 if (FULL_DUPLEX == adapter->link_duplex)
1067 value |= MAC_CTRL_DUPLX;
1069 value |= ((u32)((SPEED_1000 == adapter->link_speed) ?
1070 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
1071 MAC_CTRL_SPEED_SHIFT);
1072 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1074 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1075 value |= (((u32)adapter->hw.preamble_len &
1076 MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
1078 __atl1e_vlan_mode(netdev->features, &value);
1080 value |= MAC_CTRL_BC_EN;
1081 if (netdev->flags & IFF_PROMISC)
1082 value |= MAC_CTRL_PROMIS_EN;
1083 if (netdev->flags & IFF_ALLMULTI)
1084 value |= MAC_CTRL_MC_ALL_EN;
1085 if (netdev->features & NETIF_F_RXALL)
1086 value |= MAC_CTRL_DBG;
1087 AT_WRITE_REG(hw, REG_MAC_CTRL, value);
1091 * atl1e_configure - Configure Transmit&Receive Unit after Reset
1092 * @adapter: board private structure
1094 * Configure the Tx /Rx unit of the MAC after a reset.
1096 static int atl1e_configure(struct atl1e_adapter *adapter)
1098 struct atl1e_hw *hw = &adapter->hw;
1100 u32 intr_status_data = 0;
1102 /* clear interrupt status */
1103 AT_WRITE_REG(hw, REG_ISR, ~0);
1105 /* 1. set MAC Address */
1106 atl1e_hw_set_mac_addr(hw);
1108 /* 2. Init the Multicast HASH table done by set_muti */
1110 /* 3. Clear any WOL status */
1111 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
1113 /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr
1114 * TPD Ring/SMB/RXF0 Page CMBs, they use the same
1115 * High 32bits memory */
1116 atl1e_configure_des_ring(adapter);
1118 /* 5. set Interrupt Moderator Timer */
1119 AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt);
1120 AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt);
1121 AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE |
1122 MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN);
1124 /* 6. rx/tx threshold to trig interrupt */
1125 AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh);
1126 AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh);
1127 AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down);
1128 AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down);
1130 /* 7. set Interrupt Clear Timer */
1131 AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict);
1134 AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
1135 VLAN_HLEN + ETH_FCS_LEN);
1137 /* 9. config TXQ early tx threshold */
1138 atl1e_configure_tx(adapter);
1140 /* 10. config RXQ */
1141 atl1e_configure_rx(adapter);
1143 /* 11. config DMA Engine */
1144 atl1e_configure_dma(adapter);
1146 /* 12. smb timer to trig interrupt */
1147 AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer);
1149 intr_status_data = AT_READ_REG(hw, REG_ISR);
1150 if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) {
1151 netdev_err(adapter->netdev,
1152 "atl1e_configure failed, PCIE phy link down\n");
1156 AT_WRITE_REG(hw, REG_ISR, 0x7fffffff);
1161 * atl1e_get_stats - Get System Network Statistics
1162 * @netdev: network interface device structure
1164 * Returns the address of the device statistics structure.
1165 * The statistics are actually updated from the timer callback.
1167 static struct net_device_stats *atl1e_get_stats(struct net_device *netdev)
1169 struct atl1e_adapter *adapter = netdev_priv(netdev);
1170 struct atl1e_hw_stats *hw_stats = &adapter->hw_stats;
1171 struct net_device_stats *net_stats = &netdev->stats;
1173 net_stats->rx_bytes = hw_stats->rx_byte_cnt;
1174 net_stats->tx_bytes = hw_stats->tx_byte_cnt;
1175 net_stats->multicast = hw_stats->rx_mcast;
1176 net_stats->collisions = hw_stats->tx_1_col +
1177 hw_stats->tx_2_col +
1178 hw_stats->tx_late_col +
1179 hw_stats->tx_abort_col;
1181 net_stats->rx_errors = hw_stats->rx_frag +
1182 hw_stats->rx_fcs_err +
1183 hw_stats->rx_len_err +
1184 hw_stats->rx_sz_ov +
1185 hw_stats->rx_rrd_ov +
1186 hw_stats->rx_align_err +
1187 hw_stats->rx_rxf_ov;
1189 net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
1190 net_stats->rx_length_errors = hw_stats->rx_len_err;
1191 net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
1192 net_stats->rx_frame_errors = hw_stats->rx_align_err;
1193 net_stats->rx_dropped = hw_stats->rx_rrd_ov;
1195 net_stats->tx_errors = hw_stats->tx_late_col +
1196 hw_stats->tx_abort_col +
1197 hw_stats->tx_underrun +
1200 net_stats->tx_fifo_errors = hw_stats->tx_underrun;
1201 net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1202 net_stats->tx_window_errors = hw_stats->tx_late_col;
1204 net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
1205 net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
1210 static void atl1e_update_hw_stats(struct atl1e_adapter *adapter)
1212 u16 hw_reg_addr = 0;
1213 unsigned long *stats_item = NULL;
1215 /* update rx status */
1216 hw_reg_addr = REG_MAC_RX_STATUS_BIN;
1217 stats_item = &adapter->hw_stats.rx_ok;
1218 while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
1219 *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr);
1223 /* update tx status */
1224 hw_reg_addr = REG_MAC_TX_STATUS_BIN;
1225 stats_item = &adapter->hw_stats.tx_ok;
1226 while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
1227 *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr);
1233 static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
1237 spin_lock(&adapter->mdio_lock);
1238 atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data);
1239 spin_unlock(&adapter->mdio_lock);
1242 static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
1244 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1245 struct atl1e_tx_buffer *tx_buffer = NULL;
1246 u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
1247 u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
1249 while (next_to_clean != hw_next_to_clean) {
1250 tx_buffer = &tx_ring->tx_buffer[next_to_clean];
1251 if (tx_buffer->dma) {
1252 if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
1253 pci_unmap_single(adapter->pdev, tx_buffer->dma,
1254 tx_buffer->length, PCI_DMA_TODEVICE);
1255 else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
1256 pci_unmap_page(adapter->pdev, tx_buffer->dma,
1257 tx_buffer->length, PCI_DMA_TODEVICE);
1261 if (tx_buffer->skb) {
1262 dev_consume_skb_irq(tx_buffer->skb);
1263 tx_buffer->skb = NULL;
1266 if (++next_to_clean == tx_ring->count)
1270 atomic_set(&tx_ring->next_to_clean, next_to_clean);
1272 if (netif_queue_stopped(adapter->netdev) &&
1273 netif_carrier_ok(adapter->netdev)) {
1274 netif_wake_queue(adapter->netdev);
1281 * atl1e_intr - Interrupt Handler
1282 * @irq: interrupt number
1283 * @data: pointer to a network interface device structure
1285 static irqreturn_t atl1e_intr(int irq, void *data)
1287 struct net_device *netdev = data;
1288 struct atl1e_adapter *adapter = netdev_priv(netdev);
1289 struct atl1e_hw *hw = &adapter->hw;
1290 int max_ints = AT_MAX_INT_WORK;
1291 int handled = IRQ_NONE;
1295 status = AT_READ_REG(hw, REG_ISR);
1296 if ((status & IMR_NORMAL_MASK) == 0 ||
1297 (status & ISR_DIS_INT) != 0) {
1298 if (max_ints != AT_MAX_INT_WORK)
1299 handled = IRQ_HANDLED;
1303 if (status & ISR_GPHY)
1304 atl1e_clear_phy_int(adapter);
1306 AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
1308 handled = IRQ_HANDLED;
1309 /* check if PCIE PHY Link down */
1310 if (status & ISR_PHY_LINKDOWN) {
1311 netdev_err(adapter->netdev,
1312 "pcie phy linkdown %x\n", status);
1313 if (netif_running(adapter->netdev)) {
1315 atl1e_irq_reset(adapter);
1316 schedule_work(&adapter->reset_task);
1321 /* check if DMA read/write error */
1322 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1323 netdev_err(adapter->netdev,
1324 "PCIE DMA RW error (status = 0x%x)\n",
1326 atl1e_irq_reset(adapter);
1327 schedule_work(&adapter->reset_task);
1331 if (status & ISR_SMB)
1332 atl1e_update_hw_stats(adapter);
1335 if (status & (ISR_GPHY | ISR_MANUAL)) {
1336 netdev->stats.tx_carrier_errors++;
1337 atl1e_link_chg_event(adapter);
1341 /* transmit event */
1342 if (status & ISR_TX_EVENT)
1343 atl1e_clean_tx_irq(adapter);
1345 if (status & ISR_RX_EVENT) {
1347 * disable rx interrupts, without
1348 * the synchronize_irq bit
1350 AT_WRITE_REG(hw, REG_IMR,
1351 IMR_NORMAL_MASK & ~ISR_RX_EVENT);
1353 if (likely(napi_schedule_prep(
1355 __napi_schedule(&adapter->napi);
1357 } while (--max_ints > 0);
1358 /* re-enable Interrupt*/
1359 AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
1364 static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter,
1365 struct sk_buff *skb, struct atl1e_recv_ret_status *prrs)
1367 u8 *packet = (u8 *)(prrs + 1);
1369 u16 head_len = ETH_HLEN;
1373 skb_checksum_none_assert(skb);
1374 pkt_flags = prrs->pkt_flag;
1375 err_flags = prrs->err_flag;
1376 if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) &&
1377 ((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) {
1378 if (pkt_flags & RRS_IS_IPV4) {
1379 if (pkt_flags & RRS_IS_802_3)
1381 iph = (struct iphdr *) (packet + head_len);
1382 if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF))
1385 if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) {
1386 skb->ip_summed = CHECKSUM_UNNECESSARY;
1395 static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
1398 struct atl1e_rx_page_desc *rx_page_desc =
1399 (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
1400 u8 rx_using = rx_page_desc[que].rx_using;
1402 return &(rx_page_desc[que].rx_page[rx_using]);
1405 static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1406 int *work_done, int work_to_do)
1408 struct net_device *netdev = adapter->netdev;
1409 struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
1410 struct atl1e_rx_page_desc *rx_page_desc =
1411 (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
1412 struct sk_buff *skb = NULL;
1413 struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que);
1414 u32 packet_size, write_offset;
1415 struct atl1e_recv_ret_status *prrs;
1417 write_offset = *(rx_page->write_offset_addr);
1418 if (likely(rx_page->read_offset < write_offset)) {
1420 if (*work_done >= work_to_do)
1423 /* get new packet's rrs */
1424 prrs = (struct atl1e_recv_ret_status *) (rx_page->addr +
1425 rx_page->read_offset);
1426 /* check sequence number */
1427 if (prrs->seq_num != rx_page_desc[que].rx_nxseq) {
1429 "rx sequence number error (rx=%d) (expect=%d)\n",
1431 rx_page_desc[que].rx_nxseq);
1432 rx_page_desc[que].rx_nxseq++;
1433 /* just for debug use */
1434 AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0,
1435 (((u32)prrs->seq_num) << 16) |
1436 rx_page_desc[que].rx_nxseq);
1439 rx_page_desc[que].rx_nxseq++;
1442 if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) &&
1443 !(netdev->features & NETIF_F_RXALL)) {
1444 if (prrs->err_flag & (RRS_ERR_BAD_CRC |
1445 RRS_ERR_DRIBBLE | RRS_ERR_CODE |
1447 /* hardware error, discard this packet*/
1449 "rx packet desc error %x\n",
1450 *((u32 *)prrs + 1));
1455 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
1457 if (likely(!(netdev->features & NETIF_F_RXFCS)))
1458 packet_size -= 4; /* CRC */
1460 skb = netdev_alloc_skb_ip_align(netdev, packet_size);
1464 memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
1465 skb_put(skb, packet_size);
1466 skb->protocol = eth_type_trans(skb, netdev);
1467 atl1e_rx_checksum(adapter, skb, prrs);
1469 if (prrs->pkt_flag & RRS_IS_VLAN_TAG) {
1470 u16 vlan_tag = (prrs->vtag >> 4) |
1471 ((prrs->vtag & 7) << 13) |
1472 ((prrs->vtag & 8) << 9);
1474 "RXD VLAN TAG<RRD>=0x%04x\n",
1476 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1478 napi_gro_receive(&adapter->napi, skb);
1481 /* skip current packet whether it's ok or not. */
1482 rx_page->read_offset +=
1483 (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
1484 RRS_PKT_SIZE_MASK) +
1485 sizeof(struct atl1e_recv_ret_status) + 31) &
1488 if (rx_page->read_offset >= rx_ring->page_size) {
1489 /* mark this page clean */
1493 rx_page->read_offset =
1494 *(rx_page->write_offset_addr) = 0;
1495 rx_using = rx_page_desc[que].rx_using;
1497 atl1e_rx_page_vld_regs[que][rx_using];
1498 AT_WRITE_REGB(&adapter->hw, reg_addr, 1);
1499 rx_page_desc[que].rx_using ^= 1;
1500 rx_page = atl1e_get_rx_page(adapter, que);
1502 write_offset = *(rx_page->write_offset_addr);
1503 } while (rx_page->read_offset < write_offset);
1509 if (!test_bit(__AT_DOWN, &adapter->flags))
1510 schedule_work(&adapter->reset_task);
1514 * atl1e_clean - NAPI Rx polling callback
1516 static int atl1e_clean(struct napi_struct *napi, int budget)
1518 struct atl1e_adapter *adapter =
1519 container_of(napi, struct atl1e_adapter, napi);
1523 /* Keep link state information with original netdev */
1524 if (!netif_carrier_ok(adapter->netdev))
1527 atl1e_clean_rx_irq(adapter, 0, &work_done, budget);
1529 /* If no Tx and not enough Rx work done, exit the polling mode */
1530 if (work_done < budget) {
1532 napi_complete_done(napi, work_done);
1533 imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
1534 AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
1536 if (test_bit(__AT_DOWN, &adapter->flags)) {
1537 atomic_dec(&adapter->irq_sem);
1538 netdev_err(adapter->netdev,
1539 "atl1e_clean is called when AT_DOWN\n");
1541 /* reenable RX intr */
1542 /*atl1e_irq_enable(adapter); */
1548 #ifdef CONFIG_NET_POLL_CONTROLLER
1551 * Polling 'interrupt' - used by things like netconsole to send skbs
1552 * without having to re-enable interrupts. It's not called while
1553 * the interrupt routine is executing.
1555 static void atl1e_netpoll(struct net_device *netdev)
1557 struct atl1e_adapter *adapter = netdev_priv(netdev);
1559 disable_irq(adapter->pdev->irq);
1560 atl1e_intr(adapter->pdev->irq, netdev);
1561 enable_irq(adapter->pdev->irq);
1565 static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter)
1567 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1568 u16 next_to_use = 0;
1569 u16 next_to_clean = 0;
1571 next_to_clean = atomic_read(&tx_ring->next_to_clean);
1572 next_to_use = tx_ring->next_to_use;
1574 return (u16)(next_to_clean > next_to_use) ?
1575 (next_to_clean - next_to_use - 1) :
1576 (tx_ring->count + next_to_clean - next_to_use - 1);
1580 * get next usable tpd
1581 * Note: should call atl1e_tdp_avail to make sure
1582 * there is enough tpd to use
1584 static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
1586 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1587 u16 next_to_use = 0;
1589 next_to_use = tx_ring->next_to_use;
1590 if (++tx_ring->next_to_use == tx_ring->count)
1591 tx_ring->next_to_use = 0;
1593 memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
1594 return &tx_ring->desc[next_to_use];
1597 static struct atl1e_tx_buffer *
1598 atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd)
1600 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1602 return &tx_ring->tx_buffer[tpd - tx_ring->desc];
1605 /* Calculate the transmit packet descript needed*/
1606 static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
1611 u16 proto_hdr_len = 0;
1613 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1614 fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1615 tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT);
1618 if (skb_is_gso(skb)) {
1619 if (skb->protocol == htons(ETH_P_IP) ||
1620 (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
1621 proto_hdr_len = skb_transport_offset(skb) +
1623 if (proto_hdr_len < skb_headlen(skb)) {
1624 tpd_req += ((skb_headlen(skb) - proto_hdr_len +
1625 MAX_TX_BUF_LEN - 1) >>
1634 static int atl1e_tso_csum(struct atl1e_adapter *adapter,
1635 struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
1637 unsigned short offload_type;
1641 if (skb_is_gso(skb)) {
1644 err = skb_cow_head(skb, 0);
1648 offload_type = skb_shinfo(skb)->gso_type;
1650 if (offload_type & SKB_GSO_TCPV4) {
1651 real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
1652 + ntohs(ip_hdr(skb)->tot_len));
1654 if (real_len < skb->len)
1655 pskb_trim(skb, real_len);
1657 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1658 if (unlikely(skb->len == hdr_len)) {
1659 /* only xsum need */
1660 netdev_warn(adapter->netdev,
1661 "IPV4 tso with zero data??\n");
1664 ip_hdr(skb)->check = 0;
1665 ip_hdr(skb)->tot_len = 0;
1666 tcp_hdr(skb)->check = ~csum_tcpudp_magic(
1670 tpd->word3 |= (ip_hdr(skb)->ihl &
1671 TDP_V4_IPHL_MASK) <<
1673 tpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1674 TPD_TCPHDRLEN_MASK) <<
1675 TPD_TCPHDRLEN_SHIFT;
1676 tpd->word3 |= ((skb_shinfo(skb)->gso_size) &
1677 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1678 tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
1685 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1688 cso = skb_checksum_start_offset(skb);
1689 if (unlikely(cso & 0x1)) {
1690 netdev_err(adapter->netdev,
1691 "payload offset should not ant event number\n");
1694 css = cso + skb->csum_offset;
1695 tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) <<
1696 TPD_PLOADOFFSET_SHIFT;
1697 tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) <<
1698 TPD_CCSUMOFFSET_SHIFT;
1699 tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT;
1706 static int atl1e_tx_map(struct atl1e_adapter *adapter,
1707 struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
1709 struct atl1e_tpd_desc *use_tpd = NULL;
1710 struct atl1e_tx_buffer *tx_buffer = NULL;
1711 u16 buf_len = skb_headlen(skb);
1718 int ring_start = adapter->tx_ring.next_to_use;
1721 nr_frags = skb_shinfo(skb)->nr_frags;
1722 segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
1725 map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1728 tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1729 tx_buffer->length = map_len;
1730 tx_buffer->dma = pci_map_single(adapter->pdev,
1731 skb->data, hdr_len, PCI_DMA_TODEVICE);
1732 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
1735 ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
1736 mapped_len += map_len;
1737 use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
1738 use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
1739 ((cpu_to_le32(tx_buffer->length) &
1740 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
1743 while (mapped_len < buf_len) {
1744 /* mapped_len == 0, means we should use the first tpd,
1745 which is given by caller */
1746 if (mapped_len == 0) {
1749 use_tpd = atl1e_get_tpd(adapter);
1750 memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
1752 tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1753 tx_buffer->skb = NULL;
1755 tx_buffer->length = map_len =
1756 ((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ?
1757 MAX_TX_BUF_LEN : (buf_len - mapped_len);
1759 pci_map_single(adapter->pdev, skb->data + mapped_len,
1760 map_len, PCI_DMA_TODEVICE);
1762 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
1763 /* We need to unwind the mappings we've done */
1764 ring_end = adapter->tx_ring.next_to_use;
1765 adapter->tx_ring.next_to_use = ring_start;
1766 while (adapter->tx_ring.next_to_use != ring_end) {
1767 tpd = atl1e_get_tpd(adapter);
1768 tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
1769 pci_unmap_single(adapter->pdev, tx_buffer->dma,
1770 tx_buffer->length, PCI_DMA_TODEVICE);
1772 /* Reset the tx rings next pointer */
1773 adapter->tx_ring.next_to_use = ring_start;
1777 ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
1778 mapped_len += map_len;
1779 use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
1780 use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
1781 ((cpu_to_le32(tx_buffer->length) &
1782 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
1785 for (f = 0; f < nr_frags; f++) {
1786 const struct skb_frag_struct *frag;
1790 frag = &skb_shinfo(skb)->frags[f];
1791 buf_len = skb_frag_size(frag);
1793 seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
1794 for (i = 0; i < seg_num; i++) {
1795 use_tpd = atl1e_get_tpd(adapter);
1796 memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
1798 tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1799 BUG_ON(tx_buffer->skb);
1801 tx_buffer->skb = NULL;
1803 (buf_len > MAX_TX_BUF_LEN) ?
1804 MAX_TX_BUF_LEN : buf_len;
1805 buf_len -= tx_buffer->length;
1807 tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev,
1809 (i * MAX_TX_BUF_LEN),
1813 if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
1814 /* We need to unwind the mappings we've done */
1815 ring_end = adapter->tx_ring.next_to_use;
1816 adapter->tx_ring.next_to_use = ring_start;
1817 while (adapter->tx_ring.next_to_use != ring_end) {
1818 tpd = atl1e_get_tpd(adapter);
1819 tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
1820 dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma,
1821 tx_buffer->length, DMA_TO_DEVICE);
1824 /* Reset the ring next to use pointer */
1825 adapter->tx_ring.next_to_use = ring_start;
1829 ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
1830 use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
1831 use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
1832 ((cpu_to_le32(tx_buffer->length) &
1833 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
1837 if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK)
1838 /* note this one is a tcp header */
1839 tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
1842 use_tpd->word3 |= 1 << TPD_EOP_SHIFT;
1843 /* The last buffer info contain the skb address,
1844 so it will be free after unmap */
1845 tx_buffer->skb = skb;
1849 static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
1850 struct atl1e_tpd_desc *tpd)
1852 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1853 /* Force memory writes to complete before letting h/w
1854 * know there are new descriptors to fetch. (Only
1855 * applicable for weak-ordered memory model archs,
1856 * such as IA-64). */
1858 AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use);
1861 static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
1862 struct net_device *netdev)
1864 struct atl1e_adapter *adapter = netdev_priv(netdev);
1866 struct atl1e_tpd_desc *tpd;
1868 if (test_bit(__AT_DOWN, &adapter->flags)) {
1869 dev_kfree_skb_any(skb);
1870 return NETDEV_TX_OK;
1873 if (unlikely(skb->len <= 0)) {
1874 dev_kfree_skb_any(skb);
1875 return NETDEV_TX_OK;
1877 tpd_req = atl1e_cal_tdp_req(skb);
1879 if (atl1e_tpd_avail(adapter) < tpd_req) {
1880 /* no enough descriptor, just stop queue */
1881 netif_stop_queue(netdev);
1882 return NETDEV_TX_BUSY;
1885 tpd = atl1e_get_tpd(adapter);
1887 if (skb_vlan_tag_present(skb)) {
1888 u16 vlan_tag = skb_vlan_tag_get(skb);
1891 tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
1892 AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag);
1893 tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) <<
1897 if (skb->protocol == htons(ETH_P_8021Q))
1898 tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT;
1900 if (skb_network_offset(skb) != ETH_HLEN)
1901 tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */
1903 /* do TSO and check sum */
1904 if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
1905 dev_kfree_skb_any(skb);
1906 return NETDEV_TX_OK;
1909 if (atl1e_tx_map(adapter, skb, tpd)) {
1910 dev_kfree_skb_any(skb);
1914 atl1e_tx_queue(adapter, tpd_req, tpd);
1916 return NETDEV_TX_OK;
1919 static void atl1e_free_irq(struct atl1e_adapter *adapter)
1921 struct net_device *netdev = adapter->netdev;
1923 free_irq(adapter->pdev->irq, netdev);
1926 static int atl1e_request_irq(struct atl1e_adapter *adapter)
1928 struct pci_dev *pdev = adapter->pdev;
1929 struct net_device *netdev = adapter->netdev;
1932 err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
1935 netdev_dbg(adapter->netdev,
1936 "Unable to allocate interrupt Error: %d\n", err);
1939 netdev_dbg(netdev, "atl1e_request_irq OK\n");
1943 int atl1e_up(struct atl1e_adapter *adapter)
1945 struct net_device *netdev = adapter->netdev;
1949 /* hardware has been reset, we need to reload some things */
1950 err = atl1e_init_hw(&adapter->hw);
1955 atl1e_init_ring_ptrs(adapter);
1956 atl1e_set_multi(netdev);
1957 atl1e_restore_vlan(adapter);
1959 if (atl1e_configure(adapter)) {
1964 clear_bit(__AT_DOWN, &adapter->flags);
1965 napi_enable(&adapter->napi);
1966 atl1e_irq_enable(adapter);
1967 val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL);
1968 AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
1969 val | MASTER_CTRL_MANUAL_INT);
1975 void atl1e_down(struct atl1e_adapter *adapter)
1977 struct net_device *netdev = adapter->netdev;
1979 /* signal that we're down so the interrupt handler does not
1980 * reschedule our watchdog timer */
1981 set_bit(__AT_DOWN, &adapter->flags);
1983 netif_stop_queue(netdev);
1985 /* reset MAC to disable all RX/TX */
1986 atl1e_reset_hw(&adapter->hw);
1989 napi_disable(&adapter->napi);
1990 atl1e_del_timer(adapter);
1991 atl1e_irq_disable(adapter);
1993 netif_carrier_off(netdev);
1994 adapter->link_speed = SPEED_0;
1995 adapter->link_duplex = -1;
1996 atl1e_clean_tx_ring(adapter);
1997 atl1e_clean_rx_ring(adapter);
2001 * atl1e_open - Called when a network interface is made active
2002 * @netdev: network interface device structure
2004 * Returns 0 on success, negative value on failure
2006 * The open entry point is called when a network interface is made
2007 * active by the system (IFF_UP). At this point all resources needed
2008 * for transmit and receive operations are allocated, the interrupt
2009 * handler is registered with the OS, the watchdog timer is started,
2010 * and the stack is notified that the interface is ready.
2012 static int atl1e_open(struct net_device *netdev)
2014 struct atl1e_adapter *adapter = netdev_priv(netdev);
2017 /* disallow open during test */
2018 if (test_bit(__AT_TESTING, &adapter->flags))
2021 /* allocate rx/tx dma buffer & descriptors */
2022 atl1e_init_ring_resources(adapter);
2023 err = atl1e_setup_ring_resources(adapter);
2027 err = atl1e_request_irq(adapter);
2031 err = atl1e_up(adapter);
2038 atl1e_free_irq(adapter);
2040 atl1e_free_ring_resources(adapter);
2041 atl1e_reset_hw(&adapter->hw);
2047 * atl1e_close - Disables a network interface
2048 * @netdev: network interface device structure
2050 * Returns 0, this is not allowed to fail
2052 * The close entry point is called when an interface is de-activated
2053 * by the OS. The hardware is still under the drivers control, but
2054 * needs to be disabled. A global MAC reset is issued to stop the
2055 * hardware, and all transmit and receive resources are freed.
2057 static int atl1e_close(struct net_device *netdev)
2059 struct atl1e_adapter *adapter = netdev_priv(netdev);
2061 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2062 atl1e_down(adapter);
2063 atl1e_free_irq(adapter);
2064 atl1e_free_ring_resources(adapter);
2069 static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2071 struct net_device *netdev = pci_get_drvdata(pdev);
2072 struct atl1e_adapter *adapter = netdev_priv(netdev);
2073 struct atl1e_hw *hw = &adapter->hw;
2075 u32 mac_ctrl_data = 0;
2076 u32 wol_ctrl_data = 0;
2077 u16 mii_advertise_data = 0;
2078 u16 mii_bmsr_data = 0;
2079 u16 mii_intr_status_data = 0;
2080 u32 wufc = adapter->wol;
2086 if (netif_running(netdev)) {
2087 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2088 atl1e_down(adapter);
2090 netif_device_detach(netdev);
2093 retval = pci_save_state(pdev);
2099 /* get link status */
2100 atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
2101 atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
2103 mii_advertise_data = ADVERTISE_10HALF;
2105 if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
2106 (atl1e_write_phy_reg(hw,
2107 MII_ADVERTISE, mii_advertise_data) != 0) ||
2108 (atl1e_phy_commit(hw)) != 0) {
2109 netdev_dbg(adapter->netdev, "set phy register failed\n");
2113 hw->phy_configured = false; /* re-init PHY when resume */
2115 /* turn on magic packet wol */
2116 if (wufc & AT_WUFC_MAG)
2117 wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2119 if (wufc & AT_WUFC_LNKC) {
2120 /* if orignal link status is link, just wait for retrive link */
2121 if (mii_bmsr_data & BMSR_LSTATUS) {
2122 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
2124 atl1e_read_phy_reg(hw, MII_BMSR,
2126 if (mii_bmsr_data & BMSR_LSTATUS)
2130 if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
2131 netdev_dbg(adapter->netdev,
2132 "Link may change when suspend\n");
2134 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
2135 /* only link up can wake up */
2136 if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) {
2137 netdev_dbg(adapter->netdev,
2138 "read write phy register failed\n");
2142 /* clear phy interrupt */
2143 atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data);
2144 /* Config MAC Ctrl register */
2145 mac_ctrl_data = MAC_CTRL_RX_EN;
2146 /* set to 10/100M halt duplex */
2147 mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT;
2148 mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
2149 MAC_CTRL_PRMLEN_MASK) <<
2150 MAC_CTRL_PRMLEN_SHIFT);
2152 __atl1e_vlan_mode(netdev->features, &mac_ctrl_data);
2154 /* magic packet maybe Broadcast&multicast&Unicast frame */
2155 if (wufc & AT_WUFC_MAG)
2156 mac_ctrl_data |= MAC_CTRL_BC_EN;
2158 netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n",
2161 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
2162 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2164 ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC);
2165 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2166 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2167 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2173 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2176 ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC);
2177 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2178 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2181 hw->phy_configured = false; /* re-init PHY when resume */
2183 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2187 if (netif_running(netdev))
2188 atl1e_free_irq(adapter);
2190 pci_disable_device(pdev);
2192 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2198 static int atl1e_resume(struct pci_dev *pdev)
2200 struct net_device *netdev = pci_get_drvdata(pdev);
2201 struct atl1e_adapter *adapter = netdev_priv(netdev);
2204 pci_set_power_state(pdev, PCI_D0);
2205 pci_restore_state(pdev);
2207 err = pci_enable_device(pdev);
2209 netdev_err(adapter->netdev,
2210 "Cannot enable PCI device from suspend\n");
2214 pci_set_master(pdev);
2216 AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
2218 pci_enable_wake(pdev, PCI_D3hot, 0);
2219 pci_enable_wake(pdev, PCI_D3cold, 0);
2221 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2223 if (netif_running(netdev)) {
2224 err = atl1e_request_irq(adapter);
2229 atl1e_reset_hw(&adapter->hw);
2231 if (netif_running(netdev))
2234 netif_device_attach(netdev);
2240 static void atl1e_shutdown(struct pci_dev *pdev)
2242 atl1e_suspend(pdev, PMSG_SUSPEND);
2245 static const struct net_device_ops atl1e_netdev_ops = {
2246 .ndo_open = atl1e_open,
2247 .ndo_stop = atl1e_close,
2248 .ndo_start_xmit = atl1e_xmit_frame,
2249 .ndo_get_stats = atl1e_get_stats,
2250 .ndo_set_rx_mode = atl1e_set_multi,
2251 .ndo_validate_addr = eth_validate_addr,
2252 .ndo_set_mac_address = atl1e_set_mac_addr,
2253 .ndo_fix_features = atl1e_fix_features,
2254 .ndo_set_features = atl1e_set_features,
2255 .ndo_change_mtu = atl1e_change_mtu,
2256 .ndo_do_ioctl = atl1e_ioctl,
2257 .ndo_tx_timeout = atl1e_tx_timeout,
2258 #ifdef CONFIG_NET_POLL_CONTROLLER
2259 .ndo_poll_controller = atl1e_netpoll,
2264 static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2266 SET_NETDEV_DEV(netdev, &pdev->dev);
2267 pci_set_drvdata(pdev, netdev);
2269 netdev->netdev_ops = &atl1e_netdev_ops;
2271 netdev->watchdog_timeo = AT_TX_WATCHDOG;
2272 /* MTU range: 42 - 8170 */
2273 netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
2274 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
2275 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2276 atl1e_set_ethtool_ops(netdev);
2278 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
2279 NETIF_F_HW_VLAN_CTAG_RX;
2280 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX;
2281 /* not enabled by default */
2282 netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
2287 * atl1e_probe - Device Initialization Routine
2288 * @pdev: PCI device information struct
2289 * @ent: entry in atl1e_pci_tbl
2291 * Returns 0 on success, negative on failure
2293 * atl1e_probe initializes an adapter identified by a pci_dev structure.
2294 * The OS initialization, configuring of the adapter private structure,
2295 * and a hardware reset occur.
2297 static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2299 struct net_device *netdev;
2300 struct atl1e_adapter *adapter = NULL;
2301 static int cards_found;
2305 err = pci_enable_device(pdev);
2307 dev_err(&pdev->dev, "cannot enable PCI device\n");
2312 * The atl1e chip can DMA to 64-bit addresses, but it uses a single
2313 * shared register for the high 32 bits, so only a single, aligned,
2314 * 4 GB physical address range can be used at a time.
2316 * Supporting 64-bit DMA on this hardware is more trouble than it's
2317 * worth. It is far easier to limit to 32-bit DMA than update
2318 * various kernel subsystems to support the mechanics required by a
2319 * fixed-high-32-bit system.
2321 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
2322 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
2323 dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
2327 err = pci_request_regions(pdev, atl1e_driver_name);
2329 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2333 pci_set_master(pdev);
2335 netdev = alloc_etherdev(sizeof(struct atl1e_adapter));
2336 if (netdev == NULL) {
2338 goto err_alloc_etherdev;
2341 err = atl1e_init_netdev(netdev, pdev);
2343 netdev_err(netdev, "init netdevice failed\n");
2344 goto err_init_netdev;
2346 adapter = netdev_priv(netdev);
2347 adapter->bd_number = cards_found;
2348 adapter->netdev = netdev;
2349 adapter->pdev = pdev;
2350 adapter->hw.adapter = adapter;
2351 adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0);
2352 if (!adapter->hw.hw_addr) {
2354 netdev_err(netdev, "cannot map device registers\n");
2359 adapter->mii.dev = netdev;
2360 adapter->mii.mdio_read = atl1e_mdio_read;
2361 adapter->mii.mdio_write = atl1e_mdio_write;
2362 adapter->mii.phy_id_mask = 0x1f;
2363 adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
2365 netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
2367 timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
2369 /* get user settings */
2370 atl1e_check_options(adapter);
2372 * Mark all PCI regions associated with PCI device
2373 * pdev as being reserved by owner atl1e_driver_name
2374 * Enables bus-mastering on the device and calls
2375 * pcibios_set_master to do the needed arch specific settings
2377 atl1e_setup_pcicmd(pdev);
2378 /* setup the private structure */
2379 err = atl1e_sw_init(adapter);
2381 netdev_err(netdev, "net device private data init failed\n");
2385 /* Init GPHY as early as possible due to power saving issue */
2386 atl1e_phy_init(&adapter->hw);
2387 /* reset the controller to
2388 * put the device in a known good starting state */
2389 err = atl1e_reset_hw(&adapter->hw);
2395 if (atl1e_read_mac_addr(&adapter->hw) != 0) {
2397 netdev_err(netdev, "get mac address failed\n");
2401 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2402 netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
2404 INIT_WORK(&adapter->reset_task, atl1e_reset_task);
2405 INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
2406 netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
2407 err = register_netdev(netdev);
2409 netdev_err(netdev, "register netdevice failed\n");
2413 /* assume we have no link for now */
2414 netif_stop_queue(netdev);
2415 netif_carrier_off(netdev);
2425 pci_iounmap(pdev, adapter->hw.hw_addr);
2428 free_netdev(netdev);
2430 pci_release_regions(pdev);
2433 pci_disable_device(pdev);
2438 * atl1e_remove - Device Removal Routine
2439 * @pdev: PCI device information struct
2441 * atl1e_remove is called by the PCI subsystem to alert the driver
2442 * that it should release a PCI device. The could be caused by a
2443 * Hot-Plug event, or because the driver is going to be removed from
2446 static void atl1e_remove(struct pci_dev *pdev)
2448 struct net_device *netdev = pci_get_drvdata(pdev);
2449 struct atl1e_adapter *adapter = netdev_priv(netdev);
2452 * flush_scheduled work may reschedule our watchdog task, so
2453 * explicitly disable watchdog tasks from being rescheduled
2455 set_bit(__AT_DOWN, &adapter->flags);
2457 atl1e_del_timer(adapter);
2458 atl1e_cancel_work(adapter);
2460 unregister_netdev(netdev);
2461 atl1e_free_ring_resources(adapter);
2462 atl1e_force_ps(&adapter->hw);
2463 pci_iounmap(pdev, adapter->hw.hw_addr);
2464 pci_release_regions(pdev);
2465 free_netdev(netdev);
2466 pci_disable_device(pdev);
2470 * atl1e_io_error_detected - called when PCI error is detected
2471 * @pdev: Pointer to PCI device
2472 * @state: The current pci connection state
2474 * This function is called after a PCI bus error affecting
2475 * this device has been detected.
2477 static pci_ers_result_t
2478 atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2480 struct net_device *netdev = pci_get_drvdata(pdev);
2481 struct atl1e_adapter *adapter = netdev_priv(netdev);
2483 netif_device_detach(netdev);
2485 if (state == pci_channel_io_perm_failure)
2486 return PCI_ERS_RESULT_DISCONNECT;
2488 if (netif_running(netdev))
2489 atl1e_down(adapter);
2491 pci_disable_device(pdev);
2493 /* Request a slot slot reset. */
2494 return PCI_ERS_RESULT_NEED_RESET;
2498 * atl1e_io_slot_reset - called after the pci bus has been reset.
2499 * @pdev: Pointer to PCI device
2501 * Restart the card from scratch, as if from a cold-boot. Implementation
2502 * resembles the first-half of the e1000_resume routine.
2504 static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
2506 struct net_device *netdev = pci_get_drvdata(pdev);
2507 struct atl1e_adapter *adapter = netdev_priv(netdev);
2509 if (pci_enable_device(pdev)) {
2510 netdev_err(adapter->netdev,
2511 "Cannot re-enable PCI device after reset\n");
2512 return PCI_ERS_RESULT_DISCONNECT;
2514 pci_set_master(pdev);
2516 pci_enable_wake(pdev, PCI_D3hot, 0);
2517 pci_enable_wake(pdev, PCI_D3cold, 0);
2519 atl1e_reset_hw(&adapter->hw);
2521 return PCI_ERS_RESULT_RECOVERED;
2525 * atl1e_io_resume - called when traffic can start flowing again.
2526 * @pdev: Pointer to PCI device
2528 * This callback is called when the error recovery driver tells us that
2529 * its OK to resume normal operation. Implementation resembles the
2530 * second-half of the atl1e_resume routine.
2532 static void atl1e_io_resume(struct pci_dev *pdev)
2534 struct net_device *netdev = pci_get_drvdata(pdev);
2535 struct atl1e_adapter *adapter = netdev_priv(netdev);
2537 if (netif_running(netdev)) {
2538 if (atl1e_up(adapter)) {
2539 netdev_err(adapter->netdev,
2540 "can't bring device back up after reset\n");
2545 netif_device_attach(netdev);
2548 static const struct pci_error_handlers atl1e_err_handler = {
2549 .error_detected = atl1e_io_error_detected,
2550 .slot_reset = atl1e_io_slot_reset,
2551 .resume = atl1e_io_resume,
2554 static struct pci_driver atl1e_driver = {
2555 .name = atl1e_driver_name,
2556 .id_table = atl1e_pci_tbl,
2557 .probe = atl1e_probe,
2558 .remove = atl1e_remove,
2559 /* Power Management Hooks */
2561 .suspend = atl1e_suspend,
2562 .resume = atl1e_resume,
2564 .shutdown = atl1e_shutdown,
2565 .err_handler = &atl1e_err_handler
2568 module_pci_driver(atl1e_driver);