1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "bcmasp_intf: " fmt
4 #include <asm/byteorder.h>
5 #include <linux/brcmphy.h>
7 #include <linux/delay.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/of_net.h>
11 #include <linux/of_mdio.h>
12 #include <linux/phy.h>
13 #include <linux/phy_fixed.h>
14 #include <linux/ptp_classify.h>
15 #include <linux/platform_device.h>
20 #include "bcmasp_intf_defs.h"
22 static int incr_ring(int index, int ring_count)
25 if (index == ring_count)
31 /* Points to last byte of descriptor */
32 static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg,
35 dma_addr_t end = beg + (ring_count * DESC_SIZE);
39 return beg + DESC_SIZE - 1;
44 /* Points to first byte of descriptor */
45 static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg,
48 dma_addr_t end = beg + (ring_count * DESC_SIZE);
57 static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en)
60 tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE);
61 tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN |
62 TX_EPKT_C_CFG_MISC_PT |
63 (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)),
66 tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
67 tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
71 static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en)
74 rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN,
77 rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
80 static void bcmasp_set_rx_mode(struct net_device *dev)
82 unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
83 struct bcmasp_intf *intf = netdev_priv(dev);
84 struct netdev_hw_addr *ha;
87 spin_lock_bh(&intf->parent->mda_lock);
89 bcmasp_disable_all_filters(intf);
91 if (dev->flags & IFF_PROMISC)
94 bcmasp_set_promisc(intf, 0);
96 bcmasp_set_broad(intf, 1);
98 bcmasp_set_oaddr(intf, dev->dev_addr, 1);
100 if (dev->flags & IFF_ALLMULTI) {
101 bcmasp_set_allmulti(intf, 1);
103 bcmasp_set_allmulti(intf, 0);
105 netdev_for_each_mc_addr(ha, dev) {
106 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
108 intf->mib.mc_filters_full_cnt++;
114 netdev_for_each_uc_addr(ha, dev) {
115 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
117 intf->mib.uc_filters_full_cnt++;
122 spin_unlock_bh(&intf->parent->mda_lock);
126 bcmasp_set_promisc(intf, 1);
127 intf->mib.promisc_filters_cnt++;
129 /* disable all filters used by this port */
130 bcmasp_disable_all_filters(intf);
132 spin_unlock_bh(&intf->parent->mda_lock);
135 static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index)
137 struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index];
140 dma_unmap_addr_set(txcb, dma_addr, 0);
141 dma_unmap_len_set(txcb, dma_len, 0);
145 static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt)
149 /* Check if we have enough room for cnt descriptors */
150 for (i = 0; i < cnt; i++) {
151 next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT);
152 if (next_index == intf->tx_spb_clean_index)
159 static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
163 struct bcmasp_intf *intf = netdev_priv(dev);
164 u32 header = 0, header2 = 0, epkt = 0;
165 struct bcmasp_pkt_offload *offload;
166 unsigned int header_cnt = 0;
170 if (skb->ip_summed != CHECKSUM_PARTIAL)
173 ret = skb_cow_head(skb, sizeof(*offload));
175 intf->mib.tx_realloc_offload_failed++;
179 switch (skb->protocol) {
180 case htons(ETH_P_IP):
181 header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
182 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
183 epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
184 ip_proto = ip_hdr(skb)->protocol;
187 case htons(ETH_P_IPV6):
188 header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
189 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
190 epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
191 ip_proto = ipv6_hdr(skb)->nexthdr;
200 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
201 epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
205 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
206 epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
213 offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload));
215 header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) |
216 PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN);
217 epkt |= PKT_OFFLOAD_EPKT_OP;
219 offload->nop = htonl(PKT_OFFLOAD_NOP);
220 offload->header = htonl(header);
221 offload->header2 = htonl(header2);
222 offload->epkt = htonl(epkt);
223 offload->end = htonl(PKT_OFFLOAD_END_OP);
229 skb_checksum_help(skb);
234 static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
236 return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
239 static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
241 rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ);
244 static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
246 rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ);
249 static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
251 return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
254 static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
256 tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID);
259 static const struct bcmasp_intf_ops bcmasp_intf_ops = {
260 .rx_desc_read = bcmasp_rx_edpkt_dma_rq,
261 .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
262 .rx_desc_write = bcmasp_rx_edpkt_dma_wq,
263 .tx_read = bcmasp_tx_spb_dma_rq,
264 .tx_write = bcmasp_tx_spb_dma_wq,
267 static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
269 struct bcmasp_intf *intf = netdev_priv(dev);
270 unsigned int total_bytes, size;
271 int spb_index, nr_frags, i, j;
272 struct bcmasp_tx_cb *txcb;
273 dma_addr_t mapping, valid;
274 struct bcmasp_desc *desc;
275 bool csum_hw = false;
279 kdev = &intf->parent->pdev->dev;
281 nr_frags = skb_shinfo(skb)->nr_frags;
283 if (tx_spb_ring_full(intf, nr_frags + 1)) {
284 netif_stop_queue(dev);
286 netdev_err(dev, "Tx Ring Full!\n");
287 return NETDEV_TX_BUSY;
290 /* Save skb len before adding csum offload header */
291 total_bytes = skb->len;
292 skb = bcmasp_csum_offload(dev, skb, &csum_hw);
296 spb_index = intf->tx_spb_index;
297 valid = intf->tx_spb_dma_valid;
298 for (i = 0; i <= nr_frags; i++) {
300 size = skb_headlen(skb);
301 if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) {
302 if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN))
306 mapping = dma_map_single(kdev, skb->data, size,
309 frag = &skb_shinfo(skb)->frags[i - 1];
310 size = skb_frag_size(frag);
311 mapping = skb_frag_dma_map(kdev, frag, 0, size,
315 if (dma_mapping_error(kdev, mapping)) {
316 intf->mib.tx_dma_failed++;
317 spb_index = intf->tx_spb_index;
318 for (j = 0; j < i; j++) {
319 bcmasp_clean_txcb(intf, spb_index);
320 spb_index = incr_ring(spb_index,
323 /* Rewind so we do not have a hole */
324 spb_index = intf->tx_spb_index;
328 txcb = &intf->tx_cbs[spb_index];
329 desc = &intf->tx_spb_cpu[spb_index];
330 memset(desc, 0, sizeof(*desc));
332 txcb->bytes_sent = total_bytes;
333 dma_unmap_addr_set(txcb, dma_addr, mapping);
334 dma_unmap_len_set(txcb, dma_len, size);
336 desc->flags |= DESC_SOF;
338 desc->flags |= DESC_EPKT_CMD;
342 desc->flags |= DESC_EOF;
348 desc->flags |= DESC_INT_EN;
350 netif_dbg(intf, tx_queued, dev,
351 "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n",
352 __func__, &mapping, desc->size, desc->flags,
355 spb_index = incr_ring(spb_index, DESC_RING_COUNT);
356 valid = incr_last_byte(valid, intf->tx_spb_dma_addr,
360 /* Ensure all descriptors have been written to DRAM for the
361 * hardware to see up-to-date contents.
365 intf->tx_spb_index = spb_index;
366 intf->tx_spb_dma_valid = valid;
367 bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
369 if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
370 netif_stop_queue(dev);
375 static void bcmasp_netif_start(struct net_device *dev)
377 struct bcmasp_intf *intf = netdev_priv(dev);
379 bcmasp_set_rx_mode(dev);
380 napi_enable(&intf->tx_napi);
381 napi_enable(&intf->rx_napi);
383 bcmasp_enable_rx_irq(intf, 1);
384 bcmasp_enable_tx_irq(intf, 1);
385 bcmasp_enable_phy_irq(intf, 1);
387 phy_start(dev->phydev);
390 static void umac_reset(struct bcmasp_intf *intf)
392 umac_wl(intf, 0x0, UMC_CMD);
393 umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
394 usleep_range(10, 100);
395 umac_wl(intf, 0x0, UMC_CMD);
398 static void umac_set_hw_addr(struct bcmasp_intf *intf,
399 const unsigned char *addr)
401 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
403 u32 mac1 = (addr[4] << 8) | addr[5];
405 umac_wl(intf, mac0, UMC_MAC0);
406 umac_wl(intf, mac1, UMC_MAC1);
409 static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
414 reg = umac_rl(intf, UMC_CMD);
419 umac_wl(intf, reg, UMC_CMD);
421 /* UniMAC stops on a packet boundary, wait for a full-sized packet
422 * to be processed (1 msec).
425 usleep_range(1000, 2000);
428 static void umac_init(struct bcmasp_intf *intf)
430 umac_wl(intf, 0x800, UMC_FRM_LEN);
431 umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
432 umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
433 umac_enable_set(intf, UMC_CMD_PROMISC, 1);
436 static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
438 struct bcmasp_intf *intf =
439 container_of(napi, struct bcmasp_intf, tx_napi);
440 struct bcmasp_intf_stats64 *stats = &intf->stats64;
441 struct device *kdev = &intf->parent->pdev->dev;
442 unsigned long read, released = 0;
443 struct bcmasp_tx_cb *txcb;
444 struct bcmasp_desc *desc;
447 read = bcmasp_intf_tx_read(intf);
448 while (intf->tx_spb_dma_read != read) {
449 txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
450 mapping = dma_unmap_addr(txcb, dma_addr);
452 dma_unmap_single(kdev, mapping,
453 dma_unmap_len(txcb, dma_len),
457 dev_consume_skb_any(txcb->skb);
459 u64_stats_update_begin(&stats->syncp);
460 u64_stats_inc(&stats->tx_packets);
461 u64_stats_add(&stats->tx_bytes, txcb->bytes_sent);
462 u64_stats_update_end(&stats->syncp);
465 desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index];
467 netif_dbg(intf, tx_done, intf->ndev,
468 "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n",
469 __func__, &mapping, desc->size, desc->flags,
470 intf->tx_spb_clean_index);
472 bcmasp_clean_txcb(intf, intf->tx_spb_clean_index);
475 intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index,
477 intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read,
478 intf->tx_spb_dma_addr,
482 /* Ensure all descriptors have been written to DRAM for the hardware
483 * to see updated contents.
487 napi_complete(&intf->tx_napi);
489 bcmasp_enable_tx_irq(intf, 1);
492 netif_wake_queue(intf->ndev);
497 static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
499 struct bcmasp_intf *intf =
500 container_of(napi, struct bcmasp_intf, rx_napi);
501 struct bcmasp_intf_stats64 *stats = &intf->stats64;
502 struct device *kdev = &intf->parent->pdev->dev;
503 unsigned long processed = 0;
504 struct bcmasp_desc *desc;
511 valid = bcmasp_intf_rx_desc_read(intf) + 1;
512 if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
513 valid = intf->rx_edpkt_dma_addr;
515 while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) {
516 desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index];
518 /* Ensure that descriptor has been fully written to DRAM by
519 * hardware before reading by the CPU
523 /* Calculate virt addr by offsetting from physical addr */
524 data = intf->rx_ring_cpu +
525 (DESC_ADDR(desc->buf) - intf->rx_ring_dma);
527 flags = DESC_FLAGS(desc->buf);
528 if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) {
529 if (net_ratelimit()) {
530 netif_err(intf, rx_status, intf->ndev,
531 "flags=0x%llx\n", flags);
534 u64_stats_update_begin(&stats->syncp);
535 if (flags & DESC_CRC_ERR)
536 u64_stats_inc(&stats->rx_crc_errs);
537 if (flags & DESC_RX_SYM_ERR)
538 u64_stats_inc(&stats->rx_sym_errs);
539 u64_stats_update_end(&stats->syncp);
544 dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size,
549 skb = napi_alloc_skb(napi, len);
551 u64_stats_update_begin(&stats->syncp);
552 u64_stats_inc(&stats->rx_dropped);
553 u64_stats_update_end(&stats->syncp);
554 intf->mib.alloc_rx_skb_failed++;
560 memcpy(skb->data, data, len);
564 if (likely(intf->crc_fwd)) {
565 skb_trim(skb, len - ETH_FCS_LEN);
569 if ((intf->ndev->features & NETIF_F_RXCSUM) &&
570 (desc->buf & DESC_CHKSUM))
571 skb->ip_summed = CHECKSUM_UNNECESSARY;
573 skb->protocol = eth_type_trans(skb, intf->ndev);
575 napi_gro_receive(napi, skb);
577 u64_stats_update_begin(&stats->syncp);
578 u64_stats_inc(&stats->rx_packets);
579 u64_stats_add(&stats->rx_bytes, len);
580 u64_stats_update_end(&stats->syncp);
583 bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) +
587 intf->rx_edpkt_dma_read =
588 incr_first_byte(intf->rx_edpkt_dma_read,
589 intf->rx_edpkt_dma_addr,
591 intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index,
595 bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
597 if (processed < budget) {
598 napi_complete_done(&intf->rx_napi, processed);
599 bcmasp_enable_rx_irq(intf, 1);
605 static void bcmasp_adj_link(struct net_device *dev)
607 struct bcmasp_intf *intf = netdev_priv(dev);
608 struct phy_device *phydev = dev->phydev;
609 u32 cmd_bits = 0, reg;
613 if (intf->old_link != phydev->link) {
615 intf->old_link = phydev->link;
618 if (intf->old_duplex != phydev->duplex) {
620 intf->old_duplex = phydev->duplex;
623 switch (phydev->speed) {
625 cmd_bits = UMC_CMD_SPEED_2500;
628 cmd_bits = UMC_CMD_SPEED_1000;
631 cmd_bits = UMC_CMD_SPEED_100;
634 cmd_bits = UMC_CMD_SPEED_10;
639 cmd_bits <<= UMC_CMD_SPEED_SHIFT;
641 if (phydev->duplex == DUPLEX_HALF)
642 cmd_bits |= UMC_CMD_HD_EN;
644 if (intf->old_pause != phydev->pause) {
646 intf->old_pause = phydev->pause;
650 cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE;
656 reg = umac_rl(intf, UMC_CMD);
657 reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) |
658 UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
659 UMC_CMD_TX_PAUSE_IGNORE);
661 umac_wl(intf, reg, UMC_CMD);
663 active = phy_init_eee(phydev, 0) >= 0;
664 bcmasp_eee_enable_set(intf, active);
667 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
672 rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
675 phy_print_status(phydev);
678 static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
680 struct device *kdev = &intf->parent->pdev->dev;
681 struct page *buffer_pg;
684 intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
685 buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
689 intf->rx_ring_cpu = page_to_virt(buffer_pg);
690 intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
692 if (dma_mapping_error(kdev, intf->rx_ring_dma))
695 intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
696 &intf->rx_edpkt_dma_addr, GFP_KERNEL);
697 if (!intf->rx_edpkt_cpu)
698 goto free_rx_buffer_dma;
701 intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
702 &intf->tx_spb_dma_addr, GFP_KERNEL);
703 if (!intf->tx_spb_cpu)
704 goto free_rx_edpkt_dma;
706 intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
709 goto free_tx_spb_dma;
714 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
715 intf->tx_spb_dma_addr);
717 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
718 intf->rx_edpkt_dma_addr);
720 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
723 __free_pages(buffer_pg, intf->rx_buf_order);
728 static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
730 struct device *kdev = &intf->parent->pdev->dev;
733 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
734 intf->rx_edpkt_dma_addr);
735 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
737 __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
740 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
741 intf->tx_spb_dma_addr);
745 static void bcmasp_init_rx(struct bcmasp_intf *intf)
747 /* Restart from index 0 */
748 intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
749 intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
750 intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
751 intf->rx_edpkt_index = 0;
753 /* Make sure channels are disabled */
754 rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
757 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ);
758 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE);
759 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE);
760 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
761 RX_EDPKT_RING_BUFFER_END);
762 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
763 RX_EDPKT_RING_BUFFER_VALID);
766 rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K <<
767 RX_EDPKT_CFG_CFG0_DBUF_SHIFT) |
768 (RX_EDPKT_CFG_CFG0_64_ALN <<
769 RX_EDPKT_CFG_CFG0_BALN_SHIFT) |
770 (RX_EDPKT_CFG_CFG0_EFRM_STUF),
772 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
773 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
774 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
775 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
776 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
778 umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
779 UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
784 static void bcmasp_init_tx(struct bcmasp_intf *intf)
786 /* Restart from index 0 */
787 intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
788 intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
789 intf->tx_spb_index = 0;
790 intf->tx_spb_clean_index = 0;
792 /* Make sure channels are disabled */
793 tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
794 tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
797 tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
798 TX_SPB_CTRL_XF_CTRL2);
799 tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
800 tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
801 tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
803 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
804 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
805 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
806 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
809 static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
811 u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN |
812 RGMII_EPHY_CFG_IDDQ_GLOBAL;
815 reg = rgmii_rl(intf, RGMII_EPHY_CNTRL);
817 reg &= ~RGMII_EPHY_CK25_DIS;
818 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
822 reg |= RGMII_EPHY_RESET;
823 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
826 reg &= ~RGMII_EPHY_RESET;
828 reg |= mask | RGMII_EPHY_RESET;
829 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
831 reg |= RGMII_EPHY_CK25_DIS;
833 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
836 /* Set or clear the LED control override to avoid lighting up LEDs
837 * while the EPHY is powered off and drawing unnecessary current.
839 reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL);
841 reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD;
843 reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD;
844 rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL);
847 static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable)
851 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
852 reg &= ~RGMII_OOB_DIS;
854 reg |= RGMII_MODE_EN;
856 reg &= ~RGMII_MODE_EN;
857 rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
860 static void bcmasp_netif_deinit(struct net_device *dev)
862 struct bcmasp_intf *intf = netdev_priv(dev);
863 u32 reg, timeout = 1000;
865 napi_disable(&intf->tx_napi);
867 bcmasp_enable_tx(intf, 0);
869 /* Flush any TX packets in the pipe */
870 tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL);
872 reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS);
873 if (!(reg & TX_SPB_DMA_FIFO_FLUSH))
875 usleep_range(1000, 2000);
876 } while (timeout-- > 0);
877 tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
879 umac_enable_set(intf, UMC_CMD_TX_EN, 0);
881 phy_stop(dev->phydev);
883 umac_enable_set(intf, UMC_CMD_RX_EN, 0);
885 bcmasp_flush_rx_port(intf);
886 usleep_range(1000, 2000);
887 bcmasp_enable_rx(intf, 0);
889 napi_disable(&intf->rx_napi);
891 /* Disable interrupts */
892 bcmasp_enable_tx_irq(intf, 0);
893 bcmasp_enable_rx_irq(intf, 0);
894 bcmasp_enable_phy_irq(intf, 0);
896 netif_napi_del(&intf->tx_napi);
897 netif_napi_del(&intf->rx_napi);
900 static int bcmasp_stop(struct net_device *dev)
902 struct bcmasp_intf *intf = netdev_priv(dev);
904 netif_dbg(intf, ifdown, dev, "bcmasp stop\n");
906 /* Stop tx from updating HW */
907 netif_tx_disable(dev);
909 bcmasp_netif_deinit(dev);
911 bcmasp_reclaim_free_buffers(intf);
913 phy_disconnect(dev->phydev);
915 /* Disable internal EPHY or external PHY */
916 if (intf->internal_phy)
917 bcmasp_ephy_enable_set(intf, false);
919 bcmasp_rgmii_mode_en_set(intf, false);
921 /* Disable the interface clocks */
922 bcmasp_core_clock_set_intf(intf, false);
924 clk_disable_unprepare(intf->parent->clk);
929 static void bcmasp_configure_port(struct bcmasp_intf *intf)
931 u32 reg, id_mode_dis = 0;
933 reg = rgmii_rl(intf, RGMII_PORT_CNTRL);
934 reg &= ~RGMII_PORT_MODE_MASK;
936 switch (intf->phy_interface) {
937 case PHY_INTERFACE_MODE_RGMII:
938 /* RGMII_NO_ID: TXC transitions at the same time as TXD
939 * (requires PCB or receiver-side delay)
940 * RGMII: Add 2ns delay on TXC (90 degree shift)
942 * ID is implicitly disabled for 100Mbps (RG)MII operation.
944 id_mode_dis = RGMII_ID_MODE_DIS;
946 case PHY_INTERFACE_MODE_RGMII_TXID:
947 reg |= RGMII_PORT_MODE_EXT_GPHY;
949 case PHY_INTERFACE_MODE_MII:
950 reg |= RGMII_PORT_MODE_EXT_EPHY;
956 if (intf->internal_phy)
957 reg |= RGMII_PORT_MODE_EPHY;
959 rgmii_wl(intf, reg, RGMII_PORT_CNTRL);
961 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
962 reg &= ~RGMII_ID_MODE_DIS;
964 rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
967 static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
969 struct bcmasp_intf *intf = netdev_priv(dev);
970 phy_interface_t phy_iface = intf->phy_interface;
971 u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
972 PHY_BRCM_DIS_TXCRXC_NOENRGY |
973 PHY_BRCM_IDDQ_SUSPEND;
974 struct phy_device *phydev = NULL;
977 /* Always enable interface clocks */
978 bcmasp_core_clock_set_intf(intf, true);
980 /* Enable internal PHY or external PHY before any MAC activity */
981 if (intf->internal_phy)
982 bcmasp_ephy_enable_set(intf, true);
984 bcmasp_rgmii_mode_en_set(intf, true);
985 bcmasp_configure_port(intf);
987 /* This is an ugly quirk but we have not been correctly
988 * interpreting the phy_interface values and we have done that
989 * across different drivers, so at least we are consistent in
992 * When the Generic PHY driver is in use either the PHY has
993 * been strapped or programmed correctly by the boot loader so
994 * we should stick to our incorrect interpretation since we
997 * Now when a dedicated PHY driver is in use, we need to
998 * reverse the meaning of the phy_interface_mode values to
999 * something that the PHY driver will interpret and act on such
1000 * that we have two mistakes canceling themselves so to speak.
1001 * We only do this for the two modes that GENET driver
1002 * officially supports on Broadcom STB chips:
1003 * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID.
1004 * Other modes are not *officially* supported with the boot
1005 * loader and the scripted environment generating Device Tree
1006 * blobs for those platforms.
1008 * Note that internal PHY and fixed-link configurations are not
1009 * affected because they use different phy_interface_t values
1010 * or the Generic PHY driver.
1012 switch (phy_iface) {
1013 case PHY_INTERFACE_MODE_RGMII:
1014 phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
1016 case PHY_INTERFACE_MODE_RGMII_TXID:
1017 phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
1024 phydev = of_phy_connect(dev, intf->phy_dn,
1025 bcmasp_adj_link, phy_flags,
1029 netdev_err(dev, "could not attach to PHY\n");
1030 goto err_phy_disable;
1033 if (intf->internal_phy)
1034 dev->phydev->irq = PHY_MAC_INTERRUPT;
1036 /* Indicate that the MAC is responsible for PHY PM */
1037 phydev->mac_managed_pm = true;
1038 } else if (!intf->wolopts) {
1039 ret = phy_resume(dev->phydev);
1041 goto err_phy_disable;
1048 /* Disable the UniMAC RX/TX */
1049 umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0);
1051 umac_set_hw_addr(intf, dev->dev_addr);
1053 intf->old_duplex = -1;
1054 intf->old_link = -1;
1055 intf->old_pause = -1;
1057 bcmasp_init_tx(intf);
1058 netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
1059 bcmasp_enable_tx(intf, 1);
1061 bcmasp_init_rx(intf);
1062 netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
1063 bcmasp_enable_rx(intf, 1);
1065 /* Turn on UniMAC TX/RX */
1066 umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1);
1068 intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
1070 bcmasp_netif_start(dev);
1072 netif_start_queue(dev);
1077 if (intf->internal_phy)
1078 bcmasp_ephy_enable_set(intf, false);
1080 bcmasp_rgmii_mode_en_set(intf, false);
1084 static int bcmasp_open(struct net_device *dev)
1086 struct bcmasp_intf *intf = netdev_priv(dev);
1089 netif_dbg(intf, ifup, dev, "bcmasp open\n");
1091 ret = bcmasp_alloc_buffers(intf);
1095 ret = clk_prepare_enable(intf->parent->clk);
1099 ret = bcmasp_netif_init(dev, true);
1101 clk_disable_unprepare(intf->parent->clk);
1108 bcmasp_reclaim_free_buffers(intf);
1113 static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue)
1115 struct bcmasp_intf *intf = netdev_priv(dev);
1117 netif_dbg(intf, tx_err, dev, "transmit timeout!\n");
1118 intf->mib.tx_timeout_cnt++;
1121 static int bcmasp_get_phys_port_name(struct net_device *dev,
1122 char *name, size_t len)
1124 struct bcmasp_intf *intf = netdev_priv(dev);
1126 if (snprintf(name, len, "p%d", intf->port) >= len)
1132 static void bcmasp_get_stats64(struct net_device *dev,
1133 struct rtnl_link_stats64 *stats)
1135 struct bcmasp_intf *intf = netdev_priv(dev);
1136 struct bcmasp_intf_stats64 *lstats;
1139 lstats = &intf->stats64;
1142 start = u64_stats_fetch_begin(&lstats->syncp);
1143 stats->rx_packets = u64_stats_read(&lstats->rx_packets);
1144 stats->rx_bytes = u64_stats_read(&lstats->rx_bytes);
1145 stats->rx_dropped = u64_stats_read(&lstats->rx_dropped);
1146 stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs);
1147 stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs);
1148 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1150 stats->tx_packets = u64_stats_read(&lstats->tx_packets);
1151 stats->tx_bytes = u64_stats_read(&lstats->tx_bytes);
1152 } while (u64_stats_fetch_retry(&lstats->syncp, start));
1155 static const struct net_device_ops bcmasp_netdev_ops = {
1156 .ndo_open = bcmasp_open,
1157 .ndo_stop = bcmasp_stop,
1158 .ndo_start_xmit = bcmasp_xmit,
1159 .ndo_tx_timeout = bcmasp_tx_timeout,
1160 .ndo_set_rx_mode = bcmasp_set_rx_mode,
1161 .ndo_get_phys_port_name = bcmasp_get_phys_port_name,
1162 .ndo_eth_ioctl = phy_do_ioctl_running,
1163 .ndo_set_mac_address = eth_mac_addr,
1164 .ndo_get_stats64 = bcmasp_get_stats64,
1167 static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
1170 intf->res.umac = priv->base + UMC_OFFSET(intf);
1171 intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
1172 (intf->port * 0x4));
1173 intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
1176 intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf);
1177 intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf);
1178 intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf);
1179 intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf);
1180 intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf);
1182 intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf);
1183 intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
1186 #define MAX_IRQ_STR_LEN 64
1187 struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
1188 struct device_node *ndev_dn, int i)
1190 struct device *dev = &priv->pdev->dev;
1191 struct bcmasp_intf *intf;
1192 struct net_device *ndev;
1195 if (of_property_read_u32(ndev_dn, "reg", &port)) {
1196 dev_warn(dev, "%s: invalid port number\n", ndev_dn->name);
1200 if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) {
1201 dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name);
1205 ndev = alloc_etherdev(sizeof(struct bcmasp_intf));
1207 dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name);
1210 intf = netdev_priv(ndev);
1212 intf->parent = priv;
1216 intf->ndev_dn = ndev_dn;
1219 ret = of_get_phy_mode(ndev_dn, &intf->phy_interface);
1221 dev_err(dev, "invalid PHY mode property\n");
1222 goto err_free_netdev;
1225 if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
1226 intf->internal_phy = true;
1228 intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0);
1229 if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) {
1230 ret = of_phy_register_fixed_link(ndev_dn);
1232 dev_warn(dev, "%s: failed to register fixed PHY\n",
1234 goto err_free_netdev;
1236 intf->phy_dn = ndev_dn;
1240 bcmasp_map_res(priv, intf);
1242 if ((!phy_interface_mode_is_rgmii(intf->phy_interface) &&
1243 intf->phy_interface != PHY_INTERFACE_MODE_MII &&
1244 intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) ||
1245 (intf->port != 1 && intf->internal_phy)) {
1246 netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n",
1247 phy_modes(intf->phy_interface), intf->port);
1249 goto err_free_netdev;
1252 ret = of_get_ethdev_address(ndev_dn, ndev);
1254 netdev_warn(ndev, "using random Ethernet MAC\n");
1255 eth_hw_addr_random(ndev);
1258 SET_NETDEV_DEV(ndev, dev);
1259 intf->ops = &bcmasp_intf_ops;
1260 ndev->netdev_ops = &bcmasp_netdev_ops;
1261 ndev->ethtool_ops = &bcmasp_ethtool_ops;
1262 intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
1265 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
1267 ndev->hw_features |= ndev->features;
1268 ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
1278 void bcmasp_interface_destroy(struct bcmasp_intf *intf)
1280 if (intf->ndev->reg_state == NETREG_REGISTERED)
1281 unregister_netdev(intf->ndev);
1282 if (of_phy_is_fixed_link(intf->ndev_dn))
1283 of_phy_deregister_fixed_link(intf->ndev_dn);
1284 free_netdev(intf->ndev);
1287 static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
1289 struct net_device *ndev = intf->ndev;
1292 reg = umac_rl(intf, UMC_MPD_CTRL);
1293 if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
1294 reg |= UMC_MPD_CTRL_MPD_EN;
1295 reg &= ~UMC_MPD_CTRL_PSW_EN;
1296 if (intf->wolopts & WAKE_MAGICSECURE) {
1297 /* Program the SecureOn password */
1298 umac_wl(intf, get_unaligned_be16(&intf->sopass[0]),
1300 umac_wl(intf, get_unaligned_be32(&intf->sopass[2]),
1302 reg |= UMC_MPD_CTRL_PSW_EN;
1304 umac_wl(intf, reg, UMC_MPD_CTRL);
1306 if (intf->wolopts & WAKE_FILTER)
1307 bcmasp_netfilt_suspend(intf);
1309 /* UniMAC receive needs to be turned on */
1310 umac_enable_set(intf, UMC_CMD_RX_EN, 1);
1312 if (intf->parent->wol_irq > 0) {
1313 wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1314 ASP_WAKEUP_INTR2_MASK_CLEAR);
1317 if (intf->eee.eee_enabled && intf->parent->eee_fixup)
1318 intf->parent->eee_fixup(intf, true);
1320 netif_dbg(intf, wol, ndev, "entered WOL mode\n");
1323 int bcmasp_interface_suspend(struct bcmasp_intf *intf)
1325 struct device *kdev = &intf->parent->pdev->dev;
1326 struct net_device *dev = intf->ndev;
1329 if (!netif_running(dev))
1332 netif_device_detach(dev);
1334 bcmasp_netif_deinit(dev);
1336 if (!intf->wolopts) {
1337 ret = phy_suspend(dev->phydev);
1341 if (intf->internal_phy)
1342 bcmasp_ephy_enable_set(intf, false);
1344 bcmasp_rgmii_mode_en_set(intf, false);
1346 /* If Wake-on-LAN is disabled, we can safely
1347 * disable the network interface clocks.
1349 bcmasp_core_clock_set_intf(intf, false);
1352 if (device_may_wakeup(kdev) && intf->wolopts)
1353 bcmasp_suspend_to_wol(intf);
1355 clk_disable_unprepare(intf->parent->clk);
1360 bcmasp_netif_init(dev, false);
1364 static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
1368 if (intf->eee.eee_enabled && intf->parent->eee_fixup)
1369 intf->parent->eee_fixup(intf, false);
1371 reg = umac_rl(intf, UMC_MPD_CTRL);
1372 reg &= ~UMC_MPD_CTRL_MPD_EN;
1373 umac_wl(intf, reg, UMC_MPD_CTRL);
1375 if (intf->parent->wol_irq > 0) {
1376 wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1377 ASP_WAKEUP_INTR2_MASK_SET);
1381 int bcmasp_interface_resume(struct bcmasp_intf *intf)
1383 struct net_device *dev = intf->ndev;
1386 if (!netif_running(dev))
1389 ret = clk_prepare_enable(intf->parent->clk);
1393 ret = bcmasp_netif_init(dev, false);
1397 bcmasp_resume_from_wol(intf);
1399 if (intf->eee.eee_enabled)
1400 bcmasp_eee_enable_set(intf, true);
1402 netif_device_attach(dev);
1407 clk_disable_unprepare(intf->parent->clk);