2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 * This file incorporates work covered by the following copyright and
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/interrupt.h>
39 #include <linux/ipv6.h>
40 #include <linux/if_vlan.h>
41 #include <linux/mdio.h>
42 #include <linux/aer.h>
43 #include <linux/bitops.h>
44 #include <linux/netdevice.h>
45 #include <linux/etherdevice.h>
46 #include <net/ip6_checksum.h>
47 #include <linux/crc32.h>
52 static const char alx_drv_name[] = "alx";
54 static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
56 struct alx_buffer *txb = &txq->bufs[entry];
58 if (dma_unmap_len(txb, size)) {
59 dma_unmap_single(txq->dev,
60 dma_unmap_addr(txb, dma),
61 dma_unmap_len(txb, size),
63 dma_unmap_len_set(txb, size, 0);
67 dev_kfree_skb_any(txb->skb);
72 static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
74 struct alx_rx_queue *rxq = alx->qnapi[0]->rxq;
76 struct alx_buffer *cur_buf;
78 u16 cur, next, count = 0;
80 next = cur = rxq->write_idx;
81 if (++next == alx->rx_ringsz)
83 cur_buf = &rxq->bufs[cur];
85 while (!cur_buf->skb && next != rxq->read_idx) {
86 struct alx_rfd *rfd = &rxq->rfd[cur];
89 * When DMA RX address is set to something like
90 * 0x....fc0, it will be very likely to cause DMA
93 * To work around it, we apply rx skb with 64 bytes
94 * longer space, and offset the address whenever
95 * 0x....fc0 is detected.
97 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
101 if (((unsigned long)skb->data & 0xfff) == 0xfc0)
102 skb_reserve(skb, 64);
104 dma = dma_map_single(&alx->hw.pdev->dev,
105 skb->data, alx->rxbuf_size,
107 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
112 /* Unfortunately, RX descriptor buffers must be 4-byte
113 * aligned, so we can't use IP alignment.
115 if (WARN_ON(dma & 3)) {
121 dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
122 dma_unmap_addr_set(cur_buf, dma, dma);
123 rfd->addr = cpu_to_le64(dma);
126 if (++next == alx->rx_ringsz)
128 cur_buf = &rxq->bufs[cur];
133 /* flush all updates before updating hardware */
135 rxq->write_idx = cur;
136 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
142 static struct alx_tx_queue *alx_tx_queue_mapping(struct alx_priv *alx,
145 unsigned int r_idx = skb->queue_mapping;
147 if (r_idx >= alx->num_txq)
148 r_idx = r_idx % alx->num_txq;
150 return alx->qnapi[r_idx]->txq;
153 static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq)
155 return netdev_get_tx_queue(txq->netdev, txq->queue_idx);
158 static inline int alx_tpd_avail(struct alx_tx_queue *txq)
160 if (txq->write_idx >= txq->read_idx)
161 return txq->count + txq->read_idx - txq->write_idx - 1;
162 return txq->read_idx - txq->write_idx - 1;
165 static bool alx_clean_tx_irq(struct alx_tx_queue *txq)
167 struct alx_priv *alx;
168 struct netdev_queue *tx_queue;
169 u16 hw_read_idx, sw_read_idx;
170 unsigned int total_bytes = 0, total_packets = 0;
171 int budget = ALX_DEFAULT_TX_WORK;
173 alx = netdev_priv(txq->netdev);
174 tx_queue = alx_get_tx_queue(txq);
176 sw_read_idx = txq->read_idx;
177 hw_read_idx = alx_read_mem16(&alx->hw, txq->c_reg);
179 if (sw_read_idx != hw_read_idx) {
180 while (sw_read_idx != hw_read_idx && budget > 0) {
183 skb = txq->bufs[sw_read_idx].skb;
185 total_bytes += skb->len;
190 alx_free_txbuf(txq, sw_read_idx);
192 if (++sw_read_idx == txq->count)
195 txq->read_idx = sw_read_idx;
197 netdev_tx_completed_queue(tx_queue, total_packets, total_bytes);
200 if (netif_tx_queue_stopped(tx_queue) && netif_carrier_ok(alx->dev) &&
201 alx_tpd_avail(txq) > txq->count / 4)
202 netif_tx_wake_queue(tx_queue);
204 return sw_read_idx == hw_read_idx;
207 static void alx_schedule_link_check(struct alx_priv *alx)
209 schedule_work(&alx->link_check_wk);
212 static void alx_schedule_reset(struct alx_priv *alx)
214 schedule_work(&alx->reset_wk);
217 static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget)
219 struct alx_priv *alx;
221 struct alx_buffer *rxb;
223 u16 length, rfd_cleaned = 0;
226 alx = netdev_priv(rxq->netdev);
228 while (work < budget) {
229 rrd = &rxq->rrd[rxq->rrd_read_idx];
230 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
232 rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
234 if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
235 RRD_SI) != rxq->read_idx ||
236 ALX_GET_FIELD(le32_to_cpu(rrd->word0),
238 alx_schedule_reset(alx);
242 rxb = &rxq->bufs[rxq->read_idx];
243 dma_unmap_single(rxq->dev,
244 dma_unmap_addr(rxb, dma),
245 dma_unmap_len(rxb, size),
247 dma_unmap_len_set(rxb, size, 0);
251 if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
252 rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
254 dev_kfree_skb_any(skb);
258 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
259 RRD_PKTLEN) - ETH_FCS_LEN;
260 skb_put(skb, length);
261 skb->protocol = eth_type_trans(skb, rxq->netdev);
263 skb_checksum_none_assert(skb);
264 if (alx->dev->features & NETIF_F_RXCSUM &&
265 !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
266 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
267 switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
269 case RRD_PID_IPV6UDP:
270 case RRD_PID_IPV4UDP:
271 case RRD_PID_IPV4TCP:
272 case RRD_PID_IPV6TCP:
273 skb->ip_summed = CHECKSUM_UNNECESSARY;
278 napi_gro_receive(&rxq->np->napi, skb);
282 if (++rxq->read_idx == rxq->count)
284 if (++rxq->rrd_read_idx == rxq->count)
285 rxq->rrd_read_idx = 0;
287 if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
288 rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
292 alx_refill_rx_ring(alx, GFP_ATOMIC);
297 static int alx_poll(struct napi_struct *napi, int budget)
299 struct alx_napi *np = container_of(napi, struct alx_napi, napi);
300 struct alx_priv *alx = np->alx;
301 struct alx_hw *hw = &alx->hw;
303 bool tx_complete = true;
307 tx_complete = alx_clean_tx_irq(np->txq);
309 work = alx_clean_rx_irq(np->rxq, budget);
311 if (!tx_complete || work == budget)
314 napi_complete_done(&np->napi, work);
316 /* enable interrupt */
317 if (alx->hw.pdev->msix_enabled) {
318 alx_mask_msix(hw, np->vec_idx, false);
320 spin_lock_irqsave(&alx->irq_lock, flags);
321 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
322 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
323 spin_unlock_irqrestore(&alx->irq_lock, flags);
331 static bool alx_intr_handle_misc(struct alx_priv *alx, u32 intr)
333 struct alx_hw *hw = &alx->hw;
335 if (intr & ALX_ISR_FATAL) {
336 netif_warn(alx, hw, alx->dev,
337 "fatal interrupt 0x%x, resetting\n", intr);
338 alx_schedule_reset(alx);
342 if (intr & ALX_ISR_ALERT)
343 netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
345 if (intr & ALX_ISR_PHY) {
346 /* suppress PHY interrupt, because the source
347 * is from PHY internal. only the internal status
348 * is cleared, the interrupt status could be cleared.
350 alx->int_mask &= ~ALX_ISR_PHY;
351 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
352 alx_schedule_link_check(alx);
358 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
360 struct alx_hw *hw = &alx->hw;
362 spin_lock(&alx->irq_lock);
365 alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
366 intr &= alx->int_mask;
368 if (alx_intr_handle_misc(alx, intr))
371 if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
372 napi_schedule(&alx->qnapi[0]->napi);
373 /* mask rx/tx interrupt, enable them when napi complete */
374 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
375 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
378 alx_write_mem32(hw, ALX_ISR, 0);
381 spin_unlock(&alx->irq_lock);
385 static irqreturn_t alx_intr_msix_ring(int irq, void *data)
387 struct alx_napi *np = data;
388 struct alx_hw *hw = &np->alx->hw;
390 /* mask interrupt to ACK chip */
391 alx_mask_msix(hw, np->vec_idx, true);
392 /* clear interrupt status */
393 alx_write_mem32(hw, ALX_ISR, np->vec_mask);
395 napi_schedule(&np->napi);
400 static irqreturn_t alx_intr_msix_misc(int irq, void *data)
402 struct alx_priv *alx = data;
403 struct alx_hw *hw = &alx->hw;
406 /* mask interrupt to ACK chip */
407 alx_mask_msix(hw, 0, true);
409 /* read interrupt status */
410 intr = alx_read_mem32(hw, ALX_ISR);
411 intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES);
413 if (alx_intr_handle_misc(alx, intr))
416 /* clear interrupt status */
417 alx_write_mem32(hw, ALX_ISR, intr);
419 /* enable interrupt again */
420 alx_mask_msix(hw, 0, false);
425 static irqreturn_t alx_intr_msi(int irq, void *data)
427 struct alx_priv *alx = data;
429 return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
432 static irqreturn_t alx_intr_legacy(int irq, void *data)
434 struct alx_priv *alx = data;
435 struct alx_hw *hw = &alx->hw;
438 intr = alx_read_mem32(hw, ALX_ISR);
440 if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
443 return alx_intr_handle(alx, intr);
446 static const u16 txring_header_reg[] = {ALX_TPD_PRI0_ADDR_LO,
447 ALX_TPD_PRI1_ADDR_LO,
448 ALX_TPD_PRI2_ADDR_LO,
449 ALX_TPD_PRI3_ADDR_LO};
451 static void alx_init_ring_ptrs(struct alx_priv *alx)
453 struct alx_hw *hw = &alx->hw;
454 u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
458 for (i = 0; i < alx->num_napi; i++) {
461 np->txq->read_idx = 0;
462 np->txq->write_idx = 0;
464 txring_header_reg[np->txq->queue_idx],
469 np->rxq->read_idx = 0;
470 np->rxq->write_idx = 0;
471 np->rxq->rrd_read_idx = 0;
472 alx_write_mem32(hw, ALX_RRD_ADDR_LO, np->rxq->rrd_dma);
473 alx_write_mem32(hw, ALX_RFD_ADDR_LO, np->rxq->rfd_dma);
477 alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
478 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
480 alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
481 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
482 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
483 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
485 /* load these pointers into the chip */
486 alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
489 static void alx_free_txring_buf(struct alx_tx_queue *txq)
496 for (i = 0; i < txq->count; i++)
497 alx_free_txbuf(txq, i);
499 memset(txq->bufs, 0, txq->count * sizeof(struct alx_buffer));
500 memset(txq->tpd, 0, txq->count * sizeof(struct alx_txd));
504 netdev_tx_reset_queue(alx_get_tx_queue(txq));
507 static void alx_free_rxring_buf(struct alx_rx_queue *rxq)
509 struct alx_buffer *cur_buf;
515 for (i = 0; i < rxq->count; i++) {
516 cur_buf = rxq->bufs + i;
518 dma_unmap_single(rxq->dev,
519 dma_unmap_addr(cur_buf, dma),
520 dma_unmap_len(cur_buf, size),
522 dev_kfree_skb(cur_buf->skb);
524 dma_unmap_len_set(cur_buf, size, 0);
525 dma_unmap_addr_set(cur_buf, dma, 0);
531 rxq->rrd_read_idx = 0;
534 static void alx_free_buffers(struct alx_priv *alx)
538 for (i = 0; i < alx->num_txq; i++)
539 if (alx->qnapi[i] && alx->qnapi[i]->txq)
540 alx_free_txring_buf(alx->qnapi[i]->txq);
542 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
543 alx_free_rxring_buf(alx->qnapi[0]->rxq);
546 static int alx_reinit_rings(struct alx_priv *alx)
548 alx_free_buffers(alx);
550 alx_init_ring_ptrs(alx);
552 if (!alx_refill_rx_ring(alx, GFP_KERNEL))
558 static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
562 crc32 = ether_crc(ETH_ALEN, addr);
563 reg = (crc32 >> 31) & 0x1;
564 bit = (crc32 >> 26) & 0x1F;
566 mc_hash[reg] |= BIT(bit);
569 static void __alx_set_rx_mode(struct net_device *netdev)
571 struct alx_priv *alx = netdev_priv(netdev);
572 struct alx_hw *hw = &alx->hw;
573 struct netdev_hw_addr *ha;
576 if (!(netdev->flags & IFF_ALLMULTI)) {
577 netdev_for_each_mc_addr(ha, netdev)
578 alx_add_mc_addr(hw, ha->addr, mc_hash);
580 alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
581 alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
584 hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
585 if (netdev->flags & IFF_PROMISC)
586 hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
587 if (netdev->flags & IFF_ALLMULTI)
588 hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
590 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
593 static void alx_set_rx_mode(struct net_device *netdev)
595 __alx_set_rx_mode(netdev);
598 static int alx_set_mac_address(struct net_device *netdev, void *data)
600 struct alx_priv *alx = netdev_priv(netdev);
601 struct alx_hw *hw = &alx->hw;
602 struct sockaddr *addr = data;
604 if (!is_valid_ether_addr(addr->sa_data))
605 return -EADDRNOTAVAIL;
607 if (netdev->addr_assign_type & NET_ADDR_RANDOM)
608 netdev->addr_assign_type ^= NET_ADDR_RANDOM;
610 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
611 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
612 alx_set_macaddr(hw, hw->mac_addr);
617 static int alx_alloc_tx_ring(struct alx_priv *alx, struct alx_tx_queue *txq,
620 txq->bufs = kcalloc(txq->count, sizeof(struct alx_buffer), GFP_KERNEL);
624 txq->tpd = alx->descmem.virt + offset;
625 txq->tpd_dma = alx->descmem.dma + offset;
626 offset += sizeof(struct alx_txd) * txq->count;
631 static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq,
634 rxq->bufs = kcalloc(rxq->count, sizeof(struct alx_buffer), GFP_KERNEL);
638 rxq->rrd = alx->descmem.virt + offset;
639 rxq->rrd_dma = alx->descmem.dma + offset;
640 offset += sizeof(struct alx_rrd) * rxq->count;
642 rxq->rfd = alx->descmem.virt + offset;
643 rxq->rfd_dma = alx->descmem.dma + offset;
644 offset += sizeof(struct alx_rfd) * rxq->count;
649 static int alx_alloc_rings(struct alx_priv *alx)
653 /* physical tx/rx ring descriptors
655 * Allocate them as a single chunk because they must not cross a
656 * 4G boundary (hardware has a single register for high 32 bits
659 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz *
661 sizeof(struct alx_rrd) * alx->rx_ringsz +
662 sizeof(struct alx_rfd) * alx->rx_ringsz;
663 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
667 if (!alx->descmem.virt)
670 /* alignment requirements */
671 BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
672 BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
674 for (i = 0; i < alx->num_txq; i++) {
675 offset = alx_alloc_tx_ring(alx, alx->qnapi[i]->txq, offset);
677 netdev_err(alx->dev, "Allocation of tx buffer failed!\n");
682 offset = alx_alloc_rx_ring(alx, alx->qnapi[0]->rxq, offset);
684 netdev_err(alx->dev, "Allocation of rx buffer failed!\n");
691 static void alx_free_rings(struct alx_priv *alx)
695 alx_free_buffers(alx);
697 for (i = 0; i < alx->num_txq; i++)
698 if (alx->qnapi[i] && alx->qnapi[i]->txq)
699 kfree(alx->qnapi[i]->txq->bufs);
701 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
702 kfree(alx->qnapi[0]->rxq->bufs);
704 if (alx->descmem.virt)
705 dma_free_coherent(&alx->hw.pdev->dev,
711 static void alx_free_napis(struct alx_priv *alx)
716 for (i = 0; i < alx->num_napi; i++) {
721 netif_napi_del(&np->napi);
725 alx->qnapi[i] = NULL;
729 static const u16 tx_pidx_reg[] = {ALX_TPD_PRI0_PIDX, ALX_TPD_PRI1_PIDX,
730 ALX_TPD_PRI2_PIDX, ALX_TPD_PRI3_PIDX};
731 static const u16 tx_cidx_reg[] = {ALX_TPD_PRI0_CIDX, ALX_TPD_PRI1_CIDX,
732 ALX_TPD_PRI2_CIDX, ALX_TPD_PRI3_CIDX};
733 static const u32 tx_vect_mask[] = {ALX_ISR_TX_Q0, ALX_ISR_TX_Q1,
734 ALX_ISR_TX_Q2, ALX_ISR_TX_Q3};
735 static const u32 rx_vect_mask[] = {ALX_ISR_RX_Q0, ALX_ISR_RX_Q1,
736 ALX_ISR_RX_Q2, ALX_ISR_RX_Q3,
737 ALX_ISR_RX_Q4, ALX_ISR_RX_Q5,
738 ALX_ISR_RX_Q6, ALX_ISR_RX_Q7};
740 static int alx_alloc_napis(struct alx_priv *alx)
743 struct alx_rx_queue *rxq;
744 struct alx_tx_queue *txq;
747 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
749 /* allocate alx_napi structures */
750 for (i = 0; i < alx->num_napi; i++) {
751 np = kzalloc(sizeof(struct alx_napi), GFP_KERNEL);
756 netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
760 /* allocate tx queues */
761 for (i = 0; i < alx->num_txq; i++) {
763 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
768 txq->p_reg = tx_pidx_reg[i];
769 txq->c_reg = tx_cidx_reg[i];
771 txq->count = alx->tx_ringsz;
772 txq->netdev = alx->dev;
773 txq->dev = &alx->hw.pdev->dev;
774 np->vec_mask |= tx_vect_mask[i];
775 alx->int_mask |= tx_vect_mask[i];
778 /* allocate rx queues */
780 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
785 rxq->np = alx->qnapi[0];
787 rxq->count = alx->rx_ringsz;
788 rxq->netdev = alx->dev;
789 rxq->dev = &alx->hw.pdev->dev;
790 np->vec_mask |= rx_vect_mask[0];
791 alx->int_mask |= rx_vect_mask[0];
796 netdev_err(alx->dev, "error allocating internal structures\n");
801 static const int txq_vec_mapping_shift[] = {
802 0, ALX_MSI_MAP_TBL1_TXQ0_SHIFT,
803 0, ALX_MSI_MAP_TBL1_TXQ1_SHIFT,
804 1, ALX_MSI_MAP_TBL2_TXQ2_SHIFT,
805 1, ALX_MSI_MAP_TBL2_TXQ3_SHIFT,
808 static void alx_config_vector_mapping(struct alx_priv *alx)
810 struct alx_hw *hw = &alx->hw;
812 int i, vector, idx, shift;
814 if (alx->hw.pdev->msix_enabled) {
816 for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) {
817 idx = txq_vec_mapping_shift[i * 2];
818 shift = txq_vec_mapping_shift[i * 2 + 1];
819 tbl[idx] |= vector << shift;
823 tbl[0] |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT;
826 alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl[0]);
827 alx_write_mem32(hw, ALX_MSI_MAP_TBL2, tbl[1]);
828 alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
831 static int alx_enable_msix(struct alx_priv *alx)
833 int err, num_vec, num_txq, num_rxq;
835 num_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES);
837 num_vec = max_t(int, num_txq, num_rxq) + 1;
839 err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec,
842 netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
846 alx->num_vec = num_vec;
847 alx->num_napi = num_vec - 1;
848 alx->num_txq = num_txq;
849 alx->num_rxq = num_rxq;
854 static int alx_request_msix(struct alx_priv *alx)
856 struct net_device *netdev = alx->dev;
857 int i, err, vector = 0, free_vector = 0;
859 err = request_irq(pci_irq_vector(alx->hw.pdev, 0), alx_intr_msix_misc,
860 0, netdev->name, alx);
864 for (i = 0; i < alx->num_napi; i++) {
865 struct alx_napi *np = alx->qnapi[i];
869 if (np->txq && np->rxq)
870 sprintf(np->irq_lbl, "%s-TxRx-%u", netdev->name,
873 sprintf(np->irq_lbl, "%s-tx-%u", netdev->name,
876 sprintf(np->irq_lbl, "%s-rx-%u", netdev->name,
879 sprintf(np->irq_lbl, "%s-unused", netdev->name);
881 np->vec_idx = vector;
882 err = request_irq(pci_irq_vector(alx->hw.pdev, vector),
883 alx_intr_msix_ring, 0, np->irq_lbl, np);
890 free_irq(pci_irq_vector(alx->hw.pdev, free_vector++), alx);
893 for (i = 0; i < vector; i++)
894 free_irq(pci_irq_vector(alx->hw.pdev,free_vector++),
901 static int alx_init_intr(struct alx_priv *alx)
905 ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
906 PCI_IRQ_MSI | PCI_IRQ_LEGACY);
917 static void alx_irq_enable(struct alx_priv *alx)
919 struct alx_hw *hw = &alx->hw;
922 /* level-1 interrupt switch */
923 alx_write_mem32(hw, ALX_ISR, 0);
924 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
927 if (alx->hw.pdev->msix_enabled) {
928 /* enable all msix irqs */
929 for (i = 0; i < alx->num_vec; i++)
930 alx_mask_msix(hw, i, false);
934 static void alx_irq_disable(struct alx_priv *alx)
936 struct alx_hw *hw = &alx->hw;
939 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
940 alx_write_mem32(hw, ALX_IMR, 0);
943 if (alx->hw.pdev->msix_enabled) {
944 for (i = 0; i < alx->num_vec; i++) {
945 alx_mask_msix(hw, i, true);
946 synchronize_irq(pci_irq_vector(alx->hw.pdev, i));
949 synchronize_irq(pci_irq_vector(alx->hw.pdev, 0));
953 static int alx_realloc_resources(struct alx_priv *alx)
959 pci_free_irq_vectors(alx->hw.pdev);
961 err = alx_init_intr(alx);
965 err = alx_alloc_napis(alx);
969 err = alx_alloc_rings(alx);
976 static int alx_request_irq(struct alx_priv *alx)
978 struct pci_dev *pdev = alx->hw.pdev;
979 struct alx_hw *hw = &alx->hw;
983 msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
985 if (alx->hw.pdev->msix_enabled) {
986 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl);
987 err = alx_request_msix(alx);
991 /* msix request failed, realloc resources */
992 err = alx_realloc_resources(alx);
997 if (alx->hw.pdev->msi_enabled) {
998 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
999 msi_ctrl | ALX_MSI_MASK_SEL_LINE);
1000 err = request_irq(pci_irq_vector(pdev, 0), alx_intr_msi, 0,
1001 alx->dev->name, alx);
1005 /* fall back to legacy interrupt */
1006 pci_free_irq_vectors(alx->hw.pdev);
1009 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
1010 err = request_irq(pci_irq_vector(pdev, 0), alx_intr_legacy, IRQF_SHARED,
1011 alx->dev->name, alx);
1014 alx_config_vector_mapping(alx);
1016 netdev_err(alx->dev, "IRQ registration failed!\n");
1020 static void alx_free_irq(struct alx_priv *alx)
1022 struct pci_dev *pdev = alx->hw.pdev;
1025 free_irq(pci_irq_vector(pdev, 0), alx);
1026 if (alx->hw.pdev->msix_enabled) {
1027 for (i = 0; i < alx->num_napi; i++)
1028 free_irq(pci_irq_vector(pdev, i + 1), alx->qnapi[i]);
1031 pci_free_irq_vectors(pdev);
1034 static int alx_identify_hw(struct alx_priv *alx)
1036 struct alx_hw *hw = &alx->hw;
1037 int rev = alx_hw_revision(hw);
1039 if (rev > ALX_REV_C0)
1042 hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
1047 static int alx_init_sw(struct alx_priv *alx)
1049 struct pci_dev *pdev = alx->hw.pdev;
1050 struct alx_hw *hw = &alx->hw;
1053 err = alx_identify_hw(alx);
1055 dev_err(&pdev->dev, "unrecognized chip, aborting\n");
1060 pdev->device == ALX_DEV_ID_AR8161 &&
1061 pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
1062 pdev->subsystem_device == 0x0091 &&
1063 pdev->revision == 0;
1065 hw->smb_timer = 400;
1066 hw->mtu = alx->dev->mtu;
1067 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
1068 /* MTU range: 34 - 9256 */
1069 alx->dev->min_mtu = 34;
1070 alx->dev->max_mtu = ALX_MAX_FRAME_LEN(ALX_MAX_FRAME_SIZE);
1071 alx->tx_ringsz = 256;
1072 alx->rx_ringsz = 512;
1074 alx->int_mask = ALX_ISR_MISC;
1075 hw->dma_chnl = hw->max_dma_chnl;
1076 hw->ith_tpd = alx->tx_ringsz / 3;
1077 hw->link_speed = SPEED_UNKNOWN;
1078 hw->duplex = DUPLEX_UNKNOWN;
1079 hw->adv_cfg = ADVERTISED_Autoneg |
1080 ADVERTISED_10baseT_Half |
1081 ADVERTISED_10baseT_Full |
1082 ADVERTISED_100baseT_Full |
1083 ADVERTISED_100baseT_Half |
1084 ADVERTISED_1000baseT_Full;
1085 hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
1087 hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
1088 ALX_MAC_CTRL_MHASH_ALG_HI5B |
1089 ALX_MAC_CTRL_BRD_EN |
1090 ALX_MAC_CTRL_PCRCE |
1092 ALX_MAC_CTRL_RXFC_EN |
1093 ALX_MAC_CTRL_TXFC_EN |
1094 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
1100 static netdev_features_t alx_fix_features(struct net_device *netdev,
1101 netdev_features_t features)
1103 if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
1104 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1109 static void alx_netif_stop(struct alx_priv *alx)
1113 netif_trans_update(alx->dev);
1114 if (netif_carrier_ok(alx->dev)) {
1115 netif_carrier_off(alx->dev);
1116 netif_tx_disable(alx->dev);
1117 for (i = 0; i < alx->num_napi; i++)
1118 napi_disable(&alx->qnapi[i]->napi);
1122 static void alx_halt(struct alx_priv *alx)
1124 struct alx_hw *hw = &alx->hw;
1126 alx_netif_stop(alx);
1127 hw->link_speed = SPEED_UNKNOWN;
1128 hw->duplex = DUPLEX_UNKNOWN;
1132 /* disable l0s/l1 */
1133 alx_enable_aspm(hw, false, false);
1134 alx_irq_disable(alx);
1135 alx_free_buffers(alx);
1138 static void alx_configure(struct alx_priv *alx)
1140 struct alx_hw *hw = &alx->hw;
1142 alx_configure_basic(hw);
1143 alx_disable_rss(hw);
1144 __alx_set_rx_mode(alx->dev);
1146 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
1149 static void alx_activate(struct alx_priv *alx)
1151 /* hardware setting lost, restore it */
1152 alx_reinit_rings(alx);
1155 /* clear old interrupts */
1156 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1158 alx_irq_enable(alx);
1160 alx_schedule_link_check(alx);
1163 static void alx_reinit(struct alx_priv *alx)
1171 static int alx_change_mtu(struct net_device *netdev, int mtu)
1173 struct alx_priv *alx = netdev_priv(netdev);
1174 int max_frame = ALX_MAX_FRAME_LEN(mtu);
1178 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
1179 netdev_update_features(netdev);
1180 if (netif_running(netdev))
1185 static void alx_netif_start(struct alx_priv *alx)
1189 netif_tx_wake_all_queues(alx->dev);
1190 for (i = 0; i < alx->num_napi; i++)
1191 napi_enable(&alx->qnapi[i]->napi);
1192 netif_carrier_on(alx->dev);
1195 static int __alx_open(struct alx_priv *alx, bool resume)
1199 err = alx_enable_msix(alx);
1201 err = alx_init_intr(alx);
1207 netif_carrier_off(alx->dev);
1209 err = alx_alloc_napis(alx);
1211 goto out_disable_adv_intr;
1213 err = alx_alloc_rings(alx);
1215 goto out_free_rings;
1219 err = alx_request_irq(alx);
1221 goto out_free_rings;
1223 /* must be called after alx_request_irq because the chip stops working
1224 * if we copy the dma addresses in alx_init_ring_ptrs twice when
1225 * requesting msi-x interrupts failed
1227 alx_reinit_rings(alx);
1229 netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
1230 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
1232 /* clear old interrupts */
1233 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1235 alx_irq_enable(alx);
1238 netif_tx_start_all_queues(alx->dev);
1240 alx_schedule_link_check(alx);
1244 alx_free_rings(alx);
1245 alx_free_napis(alx);
1246 out_disable_adv_intr:
1247 pci_free_irq_vectors(alx->hw.pdev);
1251 static void __alx_stop(struct alx_priv *alx)
1255 alx_free_rings(alx);
1256 alx_free_napis(alx);
1259 static const char *alx_speed_desc(struct alx_hw *hw)
1261 switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) {
1262 case ADVERTISED_1000baseT_Full:
1263 return "1 Gbps Full";
1264 case ADVERTISED_100baseT_Full:
1265 return "100 Mbps Full";
1266 case ADVERTISED_100baseT_Half:
1267 return "100 Mbps Half";
1268 case ADVERTISED_10baseT_Full:
1269 return "10 Mbps Full";
1270 case ADVERTISED_10baseT_Half:
1271 return "10 Mbps Half";
1273 return "Unknown speed";
1277 static void alx_check_link(struct alx_priv *alx)
1279 struct alx_hw *hw = &alx->hw;
1280 unsigned long flags;
1284 /* clear PHY internal interrupt status, otherwise the main
1285 * interrupt status will be asserted forever
1287 alx_clear_phy_intr(hw);
1289 old_speed = hw->link_speed;
1290 err = alx_read_phy_link(hw);
1294 spin_lock_irqsave(&alx->irq_lock, flags);
1295 alx->int_mask |= ALX_ISR_PHY;
1296 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
1297 spin_unlock_irqrestore(&alx->irq_lock, flags);
1299 if (old_speed == hw->link_speed)
1302 if (hw->link_speed != SPEED_UNKNOWN) {
1303 netif_info(alx, link, alx->dev,
1304 "NIC Up: %s\n", alx_speed_desc(hw));
1305 alx_post_phy_link(hw);
1306 alx_enable_aspm(hw, true, true);
1309 if (old_speed == SPEED_UNKNOWN)
1310 alx_netif_start(alx);
1312 /* link is now down */
1313 alx_netif_stop(alx);
1314 netif_info(alx, link, alx->dev, "Link Down\n");
1315 err = alx_reset_mac(hw);
1318 alx_irq_disable(alx);
1320 /* MAC reset causes all HW settings to be lost, restore all */
1321 err = alx_reinit_rings(alx);
1325 alx_enable_aspm(hw, false, true);
1326 alx_post_phy_link(hw);
1327 alx_irq_enable(alx);
1333 alx_schedule_reset(alx);
1336 static int alx_open(struct net_device *netdev)
1338 return __alx_open(netdev_priv(netdev), false);
1341 static int alx_stop(struct net_device *netdev)
1343 __alx_stop(netdev_priv(netdev));
1347 static void alx_link_check(struct work_struct *work)
1349 struct alx_priv *alx;
1351 alx = container_of(work, struct alx_priv, link_check_wk);
1354 alx_check_link(alx);
1358 static void alx_reset(struct work_struct *work)
1360 struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
1367 static int alx_tpd_req(struct sk_buff *skb)
1371 num = skb_shinfo(skb)->nr_frags + 1;
1372 /* we need one extra descriptor for LSOv2 */
1373 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1379 static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
1383 if (skb->ip_summed != CHECKSUM_PARTIAL)
1386 cso = skb_checksum_start_offset(skb);
1390 css = cso + skb->csum_offset;
1391 first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
1392 first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
1393 first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
1398 static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
1402 if (skb->ip_summed != CHECKSUM_PARTIAL)
1405 if (!skb_is_gso(skb))
1408 err = skb_cow_head(skb, 0);
1412 if (skb->protocol == htons(ETH_P_IP)) {
1413 struct iphdr *iph = ip_hdr(skb);
1416 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1418 first->word1 |= 1 << TPD_IPV4_SHIFT;
1419 } else if (skb_is_gso_v6(skb)) {
1420 ipv6_hdr(skb)->payload_len = 0;
1421 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1422 &ipv6_hdr(skb)->daddr,
1424 /* LSOv2: the first TPD only provides the packet length */
1425 first->adrl.l.pkt_len = skb->len;
1426 first->word1 |= 1 << TPD_LSO_V2_SHIFT;
1429 first->word1 |= 1 << TPD_LSO_EN_SHIFT;
1430 first->word1 |= (skb_transport_offset(skb) &
1431 TPD_L4HDROFFSET_MASK) << TPD_L4HDROFFSET_SHIFT;
1432 first->word1 |= (skb_shinfo(skb)->gso_size &
1433 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1437 static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
1439 struct alx_txd *tpd, *first_tpd;
1441 int maplen, f, first_idx = txq->write_idx;
1443 first_tpd = &txq->tpd[txq->write_idx];
1446 if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) {
1447 if (++txq->write_idx == txq->count)
1450 tpd = &txq->tpd[txq->write_idx];
1451 tpd->len = first_tpd->len;
1452 tpd->vlan_tag = first_tpd->vlan_tag;
1453 tpd->word1 = first_tpd->word1;
1456 maplen = skb_headlen(skb);
1457 dma = dma_map_single(txq->dev, skb->data, maplen,
1459 if (dma_mapping_error(txq->dev, dma))
1462 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1463 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1465 tpd->adrl.addr = cpu_to_le64(dma);
1466 tpd->len = cpu_to_le16(maplen);
1468 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
1469 struct skb_frag_struct *frag;
1471 frag = &skb_shinfo(skb)->frags[f];
1473 if (++txq->write_idx == txq->count)
1475 tpd = &txq->tpd[txq->write_idx];
1477 tpd->word1 = first_tpd->word1;
1479 maplen = skb_frag_size(frag);
1480 dma = skb_frag_dma_map(txq->dev, frag, 0,
1481 maplen, DMA_TO_DEVICE);
1482 if (dma_mapping_error(txq->dev, dma))
1484 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1485 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1487 tpd->adrl.addr = cpu_to_le64(dma);
1488 tpd->len = cpu_to_le16(maplen);
1491 /* last TPD, set EOP flag and store skb */
1492 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
1493 txq->bufs[txq->write_idx].skb = skb;
1495 if (++txq->write_idx == txq->count)
1502 while (f != txq->write_idx) {
1503 alx_free_txbuf(txq, f);
1504 if (++f == txq->count)
1510 static netdev_tx_t alx_start_xmit_ring(struct sk_buff *skb,
1511 struct alx_tx_queue *txq)
1513 struct alx_priv *alx;
1514 struct alx_txd *first;
1517 alx = netdev_priv(txq->netdev);
1519 if (alx_tpd_avail(txq) < alx_tpd_req(skb)) {
1520 netif_tx_stop_queue(alx_get_tx_queue(txq));
1524 first = &txq->tpd[txq->write_idx];
1525 memset(first, 0, sizeof(*first));
1527 tso = alx_tso(skb, first);
1530 else if (!tso && alx_tx_csum(skb, first))
1533 if (alx_map_tx_skb(txq, skb) < 0)
1536 netdev_tx_sent_queue(alx_get_tx_queue(txq), skb->len);
1538 /* flush updates before updating hardware */
1540 alx_write_mem16(&alx->hw, txq->p_reg, txq->write_idx);
1542 if (alx_tpd_avail(txq) < txq->count / 8)
1543 netif_tx_stop_queue(alx_get_tx_queue(txq));
1545 return NETDEV_TX_OK;
1548 dev_kfree_skb_any(skb);
1549 return NETDEV_TX_OK;
1552 static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1553 struct net_device *netdev)
1555 struct alx_priv *alx = netdev_priv(netdev);
1556 return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb));
1559 static void alx_tx_timeout(struct net_device *dev)
1561 struct alx_priv *alx = netdev_priv(dev);
1563 alx_schedule_reset(alx);
1566 static int alx_mdio_read(struct net_device *netdev,
1567 int prtad, int devad, u16 addr)
1569 struct alx_priv *alx = netdev_priv(netdev);
1570 struct alx_hw *hw = &alx->hw;
1574 if (prtad != hw->mdio.prtad)
1577 if (devad == MDIO_DEVAD_NONE)
1578 err = alx_read_phy_reg(hw, addr, &val);
1580 err = alx_read_phy_ext(hw, devad, addr, &val);
1587 static int alx_mdio_write(struct net_device *netdev,
1588 int prtad, int devad, u16 addr, u16 val)
1590 struct alx_priv *alx = netdev_priv(netdev);
1591 struct alx_hw *hw = &alx->hw;
1593 if (prtad != hw->mdio.prtad)
1596 if (devad == MDIO_DEVAD_NONE)
1597 return alx_write_phy_reg(hw, addr, val);
1599 return alx_write_phy_ext(hw, devad, addr, val);
1602 static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1604 struct alx_priv *alx = netdev_priv(netdev);
1606 if (!netif_running(netdev))
1609 return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
1612 #ifdef CONFIG_NET_POLL_CONTROLLER
1613 static void alx_poll_controller(struct net_device *netdev)
1615 struct alx_priv *alx = netdev_priv(netdev);
1618 if (alx->hw.pdev->msix_enabled) {
1619 alx_intr_msix_misc(0, alx);
1620 for (i = 0; i < alx->num_txq; i++)
1621 alx_intr_msix_ring(0, alx->qnapi[i]);
1622 } else if (alx->hw.pdev->msi_enabled)
1623 alx_intr_msi(0, alx);
1625 alx_intr_legacy(0, alx);
1629 static void alx_get_stats64(struct net_device *dev,
1630 struct rtnl_link_stats64 *net_stats)
1632 struct alx_priv *alx = netdev_priv(dev);
1633 struct alx_hw_stats *hw_stats = &alx->hw.stats;
1635 spin_lock(&alx->stats_lock);
1637 alx_update_hw_stats(&alx->hw);
1639 net_stats->tx_bytes = hw_stats->tx_byte_cnt;
1640 net_stats->rx_bytes = hw_stats->rx_byte_cnt;
1641 net_stats->multicast = hw_stats->rx_mcast;
1642 net_stats->collisions = hw_stats->tx_single_col +
1643 hw_stats->tx_multi_col +
1644 hw_stats->tx_late_col +
1645 hw_stats->tx_abort_col;
1647 net_stats->rx_errors = hw_stats->rx_frag +
1648 hw_stats->rx_fcs_err +
1649 hw_stats->rx_len_err +
1650 hw_stats->rx_ov_sz +
1651 hw_stats->rx_ov_rrd +
1652 hw_stats->rx_align_err +
1653 hw_stats->rx_ov_rxf;
1655 net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf;
1656 net_stats->rx_length_errors = hw_stats->rx_len_err;
1657 net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
1658 net_stats->rx_frame_errors = hw_stats->rx_align_err;
1659 net_stats->rx_dropped = hw_stats->rx_ov_rrd;
1661 net_stats->tx_errors = hw_stats->tx_late_col +
1662 hw_stats->tx_abort_col +
1663 hw_stats->tx_underrun +
1666 net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1667 net_stats->tx_fifo_errors = hw_stats->tx_underrun;
1668 net_stats->tx_window_errors = hw_stats->tx_late_col;
1670 net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
1671 net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
1673 spin_unlock(&alx->stats_lock);
1676 static const struct net_device_ops alx_netdev_ops = {
1677 .ndo_open = alx_open,
1678 .ndo_stop = alx_stop,
1679 .ndo_start_xmit = alx_start_xmit,
1680 .ndo_get_stats64 = alx_get_stats64,
1681 .ndo_set_rx_mode = alx_set_rx_mode,
1682 .ndo_validate_addr = eth_validate_addr,
1683 .ndo_set_mac_address = alx_set_mac_address,
1684 .ndo_change_mtu = alx_change_mtu,
1685 .ndo_do_ioctl = alx_ioctl,
1686 .ndo_tx_timeout = alx_tx_timeout,
1687 .ndo_fix_features = alx_fix_features,
1688 #ifdef CONFIG_NET_POLL_CONTROLLER
1689 .ndo_poll_controller = alx_poll_controller,
1693 static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1695 struct net_device *netdev;
1696 struct alx_priv *alx;
1698 bool phy_configured;
1701 err = pci_enable_device_mem(pdev);
1705 /* The alx chip can DMA to 64-bit addresses, but it uses a single
1706 * shared register for the high 32 bits, so only a single, aligned,
1707 * 4 GB physical address range can be used for descriptors.
1709 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1710 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
1712 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1714 dev_err(&pdev->dev, "No usable DMA config, aborting\n");
1715 goto out_pci_disable;
1719 err = pci_request_mem_regions(pdev, alx_drv_name);
1722 "pci_request_mem_regions failed\n");
1723 goto out_pci_disable;
1726 pci_enable_pcie_error_reporting(pdev);
1727 pci_set_master(pdev);
1729 if (!pdev->pm_cap) {
1731 "Can't find power management capability, aborting\n");
1733 goto out_pci_release;
1736 netdev = alloc_etherdev_mqs(sizeof(*alx),
1737 ALX_MAX_TX_QUEUES, 1);
1740 goto out_pci_release;
1743 SET_NETDEV_DEV(netdev, &pdev->dev);
1744 alx = netdev_priv(netdev);
1745 spin_lock_init(&alx->hw.mdio_lock);
1746 spin_lock_init(&alx->irq_lock);
1747 spin_lock_init(&alx->stats_lock);
1749 alx->hw.pdev = pdev;
1750 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
1751 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
1753 pci_set_drvdata(pdev, alx);
1755 hw->hw_addr = pci_ioremap_bar(pdev, 0);
1757 dev_err(&pdev->dev, "cannot map device registers\n");
1759 goto out_free_netdev;
1762 netdev->netdev_ops = &alx_netdev_ops;
1763 netdev->ethtool_ops = &alx_ethtool_ops;
1764 netdev->irq = pci_irq_vector(pdev, 0);
1765 netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
1767 if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
1768 pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
1770 err = alx_init_sw(alx);
1772 dev_err(&pdev->dev, "net device private data init failed\n");
1778 phy_configured = alx_phy_configured(hw);
1780 if (!phy_configured)
1783 err = alx_reset_mac(hw);
1785 dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
1789 /* setup link to put it in a known good starting state */
1790 if (!phy_configured) {
1791 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1794 "failed to configure PHY speed/duplex (err=%d)\n",
1800 netdev->hw_features = NETIF_F_SG |
1806 if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
1807 dev_warn(&pdev->dev,
1808 "Invalid permanent address programmed, using random one\n");
1809 eth_hw_addr_random(netdev);
1810 memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
1813 memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
1814 memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
1815 memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
1819 hw->mdio.dev = netdev;
1820 hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
1823 hw->mdio.mdio_read = alx_mdio_read;
1824 hw->mdio.mdio_write = alx_mdio_write;
1826 if (!alx_get_phy_info(hw)) {
1827 dev_err(&pdev->dev, "failed to identify PHY\n");
1832 INIT_WORK(&alx->link_check_wk, alx_link_check);
1833 INIT_WORK(&alx->reset_wk, alx_reset);
1834 netif_carrier_off(netdev);
1836 err = register_netdev(netdev);
1838 dev_err(&pdev->dev, "register netdevice failed\n");
1843 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1849 iounmap(hw->hw_addr);
1851 free_netdev(netdev);
1853 pci_release_mem_regions(pdev);
1855 pci_disable_device(pdev);
1859 static void alx_remove(struct pci_dev *pdev)
1861 struct alx_priv *alx = pci_get_drvdata(pdev);
1862 struct alx_hw *hw = &alx->hw;
1864 cancel_work_sync(&alx->link_check_wk);
1865 cancel_work_sync(&alx->reset_wk);
1867 /* restore permanent mac address */
1868 alx_set_macaddr(hw, hw->perm_addr);
1870 unregister_netdev(alx->dev);
1871 iounmap(hw->hw_addr);
1872 pci_release_mem_regions(pdev);
1874 pci_disable_pcie_error_reporting(pdev);
1875 pci_disable_device(pdev);
1877 free_netdev(alx->dev);
1880 #ifdef CONFIG_PM_SLEEP
1881 static int alx_suspend(struct device *dev)
1883 struct pci_dev *pdev = to_pci_dev(dev);
1884 struct alx_priv *alx = pci_get_drvdata(pdev);
1886 if (!netif_running(alx->dev))
1888 netif_device_detach(alx->dev);
1893 static int alx_resume(struct device *dev)
1895 struct pci_dev *pdev = to_pci_dev(dev);
1896 struct alx_priv *alx = pci_get_drvdata(pdev);
1897 struct alx_hw *hw = &alx->hw;
1902 if (!netif_running(alx->dev))
1904 netif_device_attach(alx->dev);
1907 err = __alx_open(alx, true);
1913 static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1914 #define ALX_PM_OPS (&alx_pm_ops)
1916 #define ALX_PM_OPS NULL
1920 static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
1921 pci_channel_state_t state)
1923 struct alx_priv *alx = pci_get_drvdata(pdev);
1924 struct net_device *netdev = alx->dev;
1925 pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
1927 dev_info(&pdev->dev, "pci error detected\n");
1931 if (netif_running(netdev)) {
1932 netif_device_detach(netdev);
1936 if (state == pci_channel_io_perm_failure)
1937 rc = PCI_ERS_RESULT_DISCONNECT;
1939 pci_disable_device(pdev);
1946 static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
1948 struct alx_priv *alx = pci_get_drvdata(pdev);
1949 struct alx_hw *hw = &alx->hw;
1950 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
1952 dev_info(&pdev->dev, "pci error slot reset\n");
1956 if (pci_enable_device(pdev)) {
1957 dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
1961 pci_set_master(pdev);
1964 if (!alx_reset_mac(hw))
1965 rc = PCI_ERS_RESULT_RECOVERED;
1972 static void alx_pci_error_resume(struct pci_dev *pdev)
1974 struct alx_priv *alx = pci_get_drvdata(pdev);
1975 struct net_device *netdev = alx->dev;
1977 dev_info(&pdev->dev, "pci error resume\n");
1981 if (netif_running(netdev)) {
1983 netif_device_attach(netdev);
1989 static const struct pci_error_handlers alx_err_handlers = {
1990 .error_detected = alx_pci_error_detected,
1991 .slot_reset = alx_pci_error_slot_reset,
1992 .resume = alx_pci_error_resume,
1995 static const struct pci_device_id alx_pci_tbl[] = {
1996 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
1997 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1998 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
1999 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2000 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
2001 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2002 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
2003 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2004 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
2005 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2006 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
2007 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
2011 static struct pci_driver alx_driver = {
2012 .name = alx_drv_name,
2013 .id_table = alx_pci_tbl,
2015 .remove = alx_remove,
2016 .err_handler = &alx_err_handlers,
2017 .driver.pm = ALX_PM_OPS,
2020 module_pci_driver(alx_driver);
2021 MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
2022 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
2023 MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
2025 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
2026 MODULE_LICENSE("GPL");