2 * Broadcom BCM7xxx System Port Ethernet MAC driver
4 * Copyright (C) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/platform_device.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <linux/phy.h>
24 #include <linux/phy_fixed.h>
29 #include "bcmsysport.h"
31 /* I/O accessors register helpers */
32 #define BCM_SYSPORT_IO_MACRO(name, offset) \
33 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
35 u32 reg = readl_relaxed(priv->base + offset + off); \
38 static inline void name##_writel(struct bcm_sysport_priv *priv, \
41 writel_relaxed(val, priv->base + offset + off); \
44 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
45 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
46 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
47 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
48 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
49 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
50 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
51 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
52 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
53 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
55 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
56 * same layout, except it has been moved by 4 bytes up, *sigh*
58 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
60 if (priv->is_lite && off >= RDMA_STATUS)
62 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
65 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
67 if (priv->is_lite && off >= RDMA_STATUS)
69 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
72 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
84 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
85 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
87 #define BCM_SYSPORT_INTR_L2(which) \
88 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
91 priv->irq##which##_mask &= ~(mask); \
92 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
94 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
97 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
98 priv->irq##which##_mask |= (mask); \
101 BCM_SYSPORT_INTR_L2(0)
102 BCM_SYSPORT_INTR_L2(1)
104 /* Register accesses to GISB/RBUS registers are expensive (few hundred
105 * nanoseconds), so keep the check for 64-bits explicit here to save
106 * one register write per-packet on 32-bits platforms.
108 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
112 #ifdef CONFIG_PHYS_ADDR_T_64BIT
113 writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
114 d + DESC_ADDR_HI_STATUS_LEN);
116 writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
119 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
120 struct dma_desc *desc,
123 /* Ports are latched, so write upper address first */
124 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
125 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
128 /* Ethtool operations */
129 static void bcm_sysport_set_rx_csum(struct net_device *dev,
130 netdev_features_t wanted)
132 struct bcm_sysport_priv *priv = netdev_priv(dev);
135 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
136 reg = rxchk_readl(priv, RXCHK_CONTROL);
142 /* If UniMAC forwards CRC, we need to skip over it to get
143 * a valid CHK bit to be set in the per-packet status word
145 if (priv->rx_chk_en && priv->crc_fwd)
146 reg |= RXCHK_SKIP_FCS;
148 reg &= ~RXCHK_SKIP_FCS;
150 /* If Broadcom tags are enabled (e.g: using a switch), make
151 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
152 * tag after the Ethernet MAC Source Address.
154 if (netdev_uses_dsa(dev))
155 reg |= RXCHK_BRCM_TAG_EN;
157 reg &= ~RXCHK_BRCM_TAG_EN;
159 rxchk_writel(priv, reg, RXCHK_CONTROL);
162 static void bcm_sysport_set_tx_csum(struct net_device *dev,
163 netdev_features_t wanted)
165 struct bcm_sysport_priv *priv = netdev_priv(dev);
168 /* Hardware transmit checksum requires us to enable the Transmit status
169 * block prepended to the packet contents
171 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
172 reg = tdma_readl(priv, TDMA_CONTROL);
174 reg |= tdma_control_bit(priv, TSB_EN);
176 reg &= ~tdma_control_bit(priv, TSB_EN);
177 tdma_writel(priv, reg, TDMA_CONTROL);
180 static int bcm_sysport_set_features(struct net_device *dev,
181 netdev_features_t features)
183 struct bcm_sysport_priv *priv = netdev_priv(dev);
185 /* Read CRC forward */
187 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
189 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
190 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
192 bcm_sysport_set_rx_csum(dev, features);
193 bcm_sysport_set_tx_csum(dev, features);
198 /* Hardware counters must be kept in sync because the order/offset
199 * is important here (order in structure declaration = order in hardware)
201 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
203 STAT_NETDEV64(rx_packets),
204 STAT_NETDEV64(tx_packets),
205 STAT_NETDEV64(rx_bytes),
206 STAT_NETDEV64(tx_bytes),
207 STAT_NETDEV(rx_errors),
208 STAT_NETDEV(tx_errors),
209 STAT_NETDEV(rx_dropped),
210 STAT_NETDEV(tx_dropped),
211 STAT_NETDEV(multicast),
212 /* UniMAC RSV counters */
213 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
214 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
215 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
216 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
217 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
218 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
219 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
220 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
221 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
222 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
223 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
224 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
225 STAT_MIB_RX("rx_multicast", mib.rx.mca),
226 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
227 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
228 STAT_MIB_RX("rx_control", mib.rx.cf),
229 STAT_MIB_RX("rx_pause", mib.rx.pf),
230 STAT_MIB_RX("rx_unknown", mib.rx.uo),
231 STAT_MIB_RX("rx_align", mib.rx.aln),
232 STAT_MIB_RX("rx_outrange", mib.rx.flr),
233 STAT_MIB_RX("rx_code", mib.rx.cde),
234 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
235 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
236 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
237 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
238 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
239 STAT_MIB_RX("rx_unicast", mib.rx.uc),
240 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
241 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
242 /* UniMAC TSV counters */
243 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
244 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
245 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
246 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
247 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
248 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
249 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
250 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
251 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
252 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
253 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
254 STAT_MIB_TX("tx_multicast", mib.tx.mca),
255 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
256 STAT_MIB_TX("tx_pause", mib.tx.pf),
257 STAT_MIB_TX("tx_control", mib.tx.cf),
258 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
259 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
260 STAT_MIB_TX("tx_defer", mib.tx.drf),
261 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
262 STAT_MIB_TX("tx_single_col", mib.tx.scl),
263 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
264 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
265 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
266 STAT_MIB_TX("tx_frags", mib.tx.frg),
267 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
268 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
269 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
270 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
271 STAT_MIB_TX("tx_unicast", mib.tx.uc),
272 /* UniMAC RUNT counters */
273 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
274 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
275 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
276 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
277 /* RXCHK misc statistics */
278 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
279 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
280 RXCHK_OTHER_DISC_CNTR),
281 /* RBUF misc statistics */
282 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
283 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
284 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
285 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
286 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
287 STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
288 STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
289 /* Per TX-queue statistics are dynamically appended */
292 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
294 static void bcm_sysport_get_drvinfo(struct net_device *dev,
295 struct ethtool_drvinfo *info)
297 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
298 strlcpy(info->version, "0.1", sizeof(info->version));
299 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
302 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
304 struct bcm_sysport_priv *priv = netdev_priv(dev);
306 return priv->msg_enable;
309 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
311 struct bcm_sysport_priv *priv = netdev_priv(dev);
313 priv->msg_enable = enable;
316 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
319 case BCM_SYSPORT_STAT_NETDEV:
320 case BCM_SYSPORT_STAT_NETDEV64:
321 case BCM_SYSPORT_STAT_RXCHK:
322 case BCM_SYSPORT_STAT_RBUF:
323 case BCM_SYSPORT_STAT_SOFT:
330 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
332 struct bcm_sysport_priv *priv = netdev_priv(dev);
333 const struct bcm_sysport_stats *s;
336 switch (string_set) {
338 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
339 s = &bcm_sysport_gstrings_stats[i];
341 !bcm_sysport_lite_stat_valid(s->type))
345 /* Include per-queue statistics */
346 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
352 static void bcm_sysport_get_strings(struct net_device *dev,
353 u32 stringset, u8 *data)
355 struct bcm_sysport_priv *priv = netdev_priv(dev);
356 const struct bcm_sysport_stats *s;
362 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
363 s = &bcm_sysport_gstrings_stats[i];
365 !bcm_sysport_lite_stat_valid(s->type))
368 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
373 for (i = 0; i < dev->num_tx_queues; i++) {
374 snprintf(buf, sizeof(buf), "txq%d_packets", i);
375 memcpy(data + j * ETH_GSTRING_LEN, buf,
379 snprintf(buf, sizeof(buf), "txq%d_bytes", i);
380 memcpy(data + j * ETH_GSTRING_LEN, buf,
390 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
394 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
395 const struct bcm_sysport_stats *s;
400 s = &bcm_sysport_gstrings_stats[i];
402 case BCM_SYSPORT_STAT_NETDEV:
403 case BCM_SYSPORT_STAT_NETDEV64:
404 case BCM_SYSPORT_STAT_SOFT:
406 case BCM_SYSPORT_STAT_MIB_RX:
407 case BCM_SYSPORT_STAT_MIB_TX:
408 case BCM_SYSPORT_STAT_RUNT:
412 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
413 offset = UMAC_MIB_STAT_OFFSET;
414 val = umac_readl(priv, UMAC_MIB_START + j + offset);
416 case BCM_SYSPORT_STAT_RXCHK:
417 val = rxchk_readl(priv, s->reg_offset);
419 rxchk_writel(priv, 0, s->reg_offset);
421 case BCM_SYSPORT_STAT_RBUF:
422 val = rbuf_readl(priv, s->reg_offset);
424 rbuf_writel(priv, 0, s->reg_offset);
429 p = (char *)priv + s->stat_offset;
433 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
436 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
437 u64 *tx_bytes, u64 *tx_packets)
439 struct bcm_sysport_tx_ring *ring;
440 u64 bytes = 0, packets = 0;
444 for (q = 0; q < priv->netdev->num_tx_queues; q++) {
445 ring = &priv->tx_rings[q];
447 start = u64_stats_fetch_begin_irq(&priv->syncp);
449 packets = ring->packets;
450 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
453 *tx_packets += packets;
457 static void bcm_sysport_get_stats(struct net_device *dev,
458 struct ethtool_stats *stats, u64 *data)
460 struct bcm_sysport_priv *priv = netdev_priv(dev);
461 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
462 struct u64_stats_sync *syncp = &priv->syncp;
463 struct bcm_sysport_tx_ring *ring;
464 u64 tx_bytes = 0, tx_packets = 0;
468 if (netif_running(dev)) {
469 bcm_sysport_update_mib_counters(priv);
470 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
471 stats64->tx_bytes = tx_bytes;
472 stats64->tx_packets = tx_packets;
475 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
476 const struct bcm_sysport_stats *s;
479 s = &bcm_sysport_gstrings_stats[i];
480 if (s->type == BCM_SYSPORT_STAT_NETDEV)
481 p = (char *)&dev->stats;
482 else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
487 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
491 if (s->stat_sizeof == sizeof(u64) &&
492 s->type == BCM_SYSPORT_STAT_NETDEV64) {
494 start = u64_stats_fetch_begin_irq(syncp);
496 } while (u64_stats_fetch_retry_irq(syncp, start));
502 /* For SYSTEMPORT Lite since we have holes in our statistics, j would
503 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
504 * needs to point to how many total statistics we have minus the
505 * number of per TX queue statistics
507 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
508 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
510 for (i = 0; i < dev->num_tx_queues; i++) {
511 ring = &priv->tx_rings[i];
512 data[j] = ring->packets;
514 data[j] = ring->bytes;
519 static void bcm_sysport_get_wol(struct net_device *dev,
520 struct ethtool_wolinfo *wol)
522 struct bcm_sysport_priv *priv = netdev_priv(dev);
525 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
526 wol->wolopts = priv->wolopts;
528 if (!(priv->wolopts & WAKE_MAGICSECURE))
531 /* Return the programmed SecureOn password */
532 reg = umac_readl(priv, UMAC_PSW_MS);
533 put_unaligned_be16(reg, &wol->sopass[0]);
534 reg = umac_readl(priv, UMAC_PSW_LS);
535 put_unaligned_be32(reg, &wol->sopass[2]);
538 static int bcm_sysport_set_wol(struct net_device *dev,
539 struct ethtool_wolinfo *wol)
541 struct bcm_sysport_priv *priv = netdev_priv(dev);
542 struct device *kdev = &priv->pdev->dev;
543 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
545 if (!device_can_wakeup(kdev))
548 if (wol->wolopts & ~supported)
551 /* Program the SecureOn password */
552 if (wol->wolopts & WAKE_MAGICSECURE) {
553 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
555 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
559 /* Flag the device and relevant IRQ as wakeup capable */
561 device_set_wakeup_enable(kdev, 1);
562 if (priv->wol_irq_disabled)
563 enable_irq_wake(priv->wol_irq);
564 priv->wol_irq_disabled = 0;
566 device_set_wakeup_enable(kdev, 0);
567 /* Avoid unbalanced disable_irq_wake calls */
568 if (!priv->wol_irq_disabled)
569 disable_irq_wake(priv->wol_irq);
570 priv->wol_irq_disabled = 1;
573 priv->wolopts = wol->wolopts;
578 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
583 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
584 reg &= ~(RDMA_INTR_THRESH_MASK |
585 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
587 reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
588 rdma_writel(priv, reg, RDMA_MBDONE_INTR);
591 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
592 struct ethtool_coalesce *ec)
594 struct bcm_sysport_priv *priv = ring->priv;
597 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
598 reg &= ~(RING_INTR_THRESH_MASK |
599 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
600 reg |= ec->tx_max_coalesced_frames;
601 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
603 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
606 static int bcm_sysport_get_coalesce(struct net_device *dev,
607 struct ethtool_coalesce *ec)
609 struct bcm_sysport_priv *priv = netdev_priv(dev);
612 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
614 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
615 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
617 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
619 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
620 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
621 ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
626 static int bcm_sysport_set_coalesce(struct net_device *dev,
627 struct ethtool_coalesce *ec)
629 struct bcm_sysport_priv *priv = netdev_priv(dev);
630 struct net_dim_cq_moder moder;
634 /* Base system clock is 125Mhz, DMA timeout is this reference clock
635 * divided by 1024, which yield roughly 8.192 us, our maximum value has
636 * to fit in the RING_TIMEOUT_MASK (16 bits).
638 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
639 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
640 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
641 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
644 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
645 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) ||
646 ec->use_adaptive_tx_coalesce)
649 for (i = 0; i < dev->num_tx_queues; i++)
650 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
652 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
653 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
654 usecs = priv->rx_coalesce_usecs;
655 pkts = priv->rx_max_coalesced_frames;
657 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
658 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
663 priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
665 /* Apply desired coalescing parameters */
666 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
671 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
673 dev_consume_skb_any(cb->skb);
675 dma_unmap_addr_set(cb, dma_addr, 0);
678 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
679 struct bcm_sysport_cb *cb)
681 struct device *kdev = &priv->pdev->dev;
682 struct net_device *ndev = priv->netdev;
683 struct sk_buff *skb, *rx_skb;
686 /* Allocate a new SKB for a new packet */
687 skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
689 priv->mib.alloc_rx_buff_failed++;
690 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
694 mapping = dma_map_single(kdev, skb->data,
695 RX_BUF_LENGTH, DMA_FROM_DEVICE);
696 if (dma_mapping_error(kdev, mapping)) {
697 priv->mib.rx_dma_failed++;
698 dev_kfree_skb_any(skb);
699 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
703 /* Grab the current SKB on the ring */
706 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
707 RX_BUF_LENGTH, DMA_FROM_DEVICE);
709 /* Put the new SKB on the ring */
711 dma_unmap_addr_set(cb, dma_addr, mapping);
712 dma_desc_set_addr(priv, cb->bd_addr, mapping);
714 netif_dbg(priv, rx_status, ndev, "RX refill\n");
716 /* Return the current SKB to the caller */
720 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
722 struct bcm_sysport_cb *cb;
726 for (i = 0; i < priv->num_rx_bds; i++) {
727 cb = &priv->rx_cbs[i];
728 skb = bcm_sysport_rx_refill(priv, cb);
738 /* Poll the hardware for up to budget packets to process */
739 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
742 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
743 struct net_device *ndev = priv->netdev;
744 unsigned int processed = 0, to_process;
745 unsigned int processed_bytes = 0;
746 struct bcm_sysport_cb *cb;
748 unsigned int p_index;
752 /* Clear status before servicing to reduce spurious interrupts */
753 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
755 /* Determine how much we should process since last call, SYSTEMPORT Lite
756 * groups the producer and consumer indexes into the same 32-bit
757 * which we access using RDMA_CONS_INDEX
760 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
762 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
763 p_index &= RDMA_PROD_INDEX_MASK;
765 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
767 netif_dbg(priv, rx_status, ndev,
768 "p_index=%d rx_c_index=%d to_process=%d\n",
769 p_index, priv->rx_c_index, to_process);
771 while ((processed < to_process) && (processed < budget)) {
772 cb = &priv->rx_cbs[priv->rx_read_ptr];
773 skb = bcm_sysport_rx_refill(priv, cb);
776 /* We do not have a backing SKB, so we do not a corresponding
777 * DMA mapping for this incoming packet since
778 * bcm_sysport_rx_refill always either has both skb and mapping
781 if (unlikely(!skb)) {
782 netif_err(priv, rx_err, ndev, "out of memory!\n");
783 ndev->stats.rx_dropped++;
784 ndev->stats.rx_errors++;
788 /* Extract the Receive Status Block prepended */
789 rsb = (struct bcm_rsb *)skb->data;
790 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
791 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
794 netif_dbg(priv, rx_status, ndev,
795 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
796 p_index, priv->rx_c_index, priv->rx_read_ptr,
799 if (unlikely(len > RX_BUF_LENGTH)) {
800 netif_err(priv, rx_status, ndev, "oversized packet\n");
801 ndev->stats.rx_length_errors++;
802 ndev->stats.rx_errors++;
803 dev_kfree_skb_any(skb);
807 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
808 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
809 ndev->stats.rx_dropped++;
810 ndev->stats.rx_errors++;
811 dev_kfree_skb_any(skb);
815 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
816 netif_err(priv, rx_err, ndev, "error packet\n");
817 if (status & RX_STATUS_OVFLOW)
818 ndev->stats.rx_over_errors++;
819 ndev->stats.rx_dropped++;
820 ndev->stats.rx_errors++;
821 dev_kfree_skb_any(skb);
827 /* Hardware validated our checksum */
828 if (likely(status & DESC_L4_CSUM))
829 skb->ip_summed = CHECKSUM_UNNECESSARY;
831 /* Hardware pre-pends packets with 2bytes before Ethernet
832 * header plus we have the Receive Status Block, strip off all
833 * of this from the SKB.
835 skb_pull(skb, sizeof(*rsb) + 2);
836 len -= (sizeof(*rsb) + 2);
837 processed_bytes += len;
839 /* UniMAC may forward CRC */
841 skb_trim(skb, len - ETH_FCS_LEN);
845 skb->protocol = eth_type_trans(skb, ndev);
846 ndev->stats.rx_packets++;
847 ndev->stats.rx_bytes += len;
848 u64_stats_update_begin(&priv->syncp);
849 stats64->rx_packets++;
850 stats64->rx_bytes += len;
851 u64_stats_update_end(&priv->syncp);
853 napi_gro_receive(&priv->napi, skb);
858 if (priv->rx_read_ptr == priv->num_rx_bds)
859 priv->rx_read_ptr = 0;
862 priv->dim.packets = processed;
863 priv->dim.bytes = processed_bytes;
868 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
869 struct bcm_sysport_cb *cb,
870 unsigned int *bytes_compl,
871 unsigned int *pkts_compl)
873 struct bcm_sysport_priv *priv = ring->priv;
874 struct device *kdev = &priv->pdev->dev;
877 *bytes_compl += cb->skb->len;
878 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
879 dma_unmap_len(cb, dma_len),
882 bcm_sysport_free_cb(cb);
884 } else if (dma_unmap_addr(cb, dma_addr)) {
885 *bytes_compl += dma_unmap_len(cb, dma_len);
886 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
887 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
888 dma_unmap_addr_set(cb, dma_addr, 0);
892 /* Reclaim queued SKBs for transmission completion, lockless version */
893 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
894 struct bcm_sysport_tx_ring *ring)
896 unsigned int pkts_compl = 0, bytes_compl = 0;
897 struct net_device *ndev = priv->netdev;
898 unsigned int txbds_processed = 0;
899 struct bcm_sysport_cb *cb;
900 unsigned int txbds_ready;
901 unsigned int c_index;
904 /* Clear status before servicing to reduce spurious interrupts */
905 if (!ring->priv->is_lite)
906 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
908 intrl2_0_writel(ring->priv, BIT(ring->index +
909 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
911 /* Compute how many descriptors have been processed since last call */
912 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
913 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
914 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
916 netif_dbg(priv, tx_done, ndev,
917 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
918 ring->index, ring->c_index, c_index, txbds_ready);
920 while (txbds_processed < txbds_ready) {
921 cb = &ring->cbs[ring->clean_index];
922 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
927 if (likely(ring->clean_index < ring->size - 1))
930 ring->clean_index = 0;
933 u64_stats_update_begin(&priv->syncp);
934 ring->packets += pkts_compl;
935 ring->bytes += bytes_compl;
936 u64_stats_update_end(&priv->syncp);
938 ring->c_index = c_index;
940 netif_dbg(priv, tx_done, ndev,
941 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
942 ring->index, ring->c_index, pkts_compl, bytes_compl);
947 /* Locked version of the per-ring TX reclaim routine */
948 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
949 struct bcm_sysport_tx_ring *ring)
951 struct netdev_queue *txq;
952 unsigned int released;
955 txq = netdev_get_tx_queue(priv->netdev, ring->index);
957 spin_lock_irqsave(&ring->lock, flags);
958 released = __bcm_sysport_tx_reclaim(priv, ring);
960 netif_tx_wake_queue(txq);
962 spin_unlock_irqrestore(&ring->lock, flags);
967 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
968 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
969 struct bcm_sysport_tx_ring *ring)
973 spin_lock_irqsave(&ring->lock, flags);
974 __bcm_sysport_tx_reclaim(priv, ring);
975 spin_unlock_irqrestore(&ring->lock, flags);
978 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
980 struct bcm_sysport_tx_ring *ring =
981 container_of(napi, struct bcm_sysport_tx_ring, napi);
982 unsigned int work_done = 0;
984 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
986 if (work_done == 0) {
988 /* re-enable TX interrupt */
989 if (!ring->priv->is_lite)
990 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
992 intrl2_0_mask_clear(ring->priv, BIT(ring->index +
993 INTRL2_0_TDMA_MBDONE_SHIFT));
1001 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
1005 for (q = 0; q < priv->netdev->num_tx_queues; q++)
1006 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
1009 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
1011 struct bcm_sysport_priv *priv =
1012 container_of(napi, struct bcm_sysport_priv, napi);
1013 struct net_dim_sample dim_sample;
1014 unsigned int work_done = 0;
1016 work_done = bcm_sysport_desc_rx(priv, budget);
1018 priv->rx_c_index += work_done;
1019 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
1021 /* SYSTEMPORT Lite groups the producer/consumer index, producer is
1022 * maintained by HW, but writes to it will be ignore while RDMA
1026 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
1028 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
1030 if (work_done < budget) {
1031 napi_complete_done(napi, work_done);
1032 /* re-enable RX interrupts */
1033 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
1036 if (priv->dim.use_dim) {
1037 net_dim_sample(priv->dim.event_ctr, priv->dim.packets,
1038 priv->dim.bytes, &dim_sample);
1039 net_dim(&priv->dim.dim, dim_sample);
1045 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
1049 reg = umac_readl(priv, UMAC_MPD_CTRL);
1054 umac_writel(priv, reg, UMAC_MPD_CTRL);
1057 bit = RBUF_ACPI_EN_LITE;
1061 reg = rbuf_readl(priv, RBUF_CONTROL);
1066 rbuf_writel(priv, reg, RBUF_CONTROL);
1069 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1073 /* Disable RXCHK, active filters and Broadcom tag matching */
1074 reg = rxchk_readl(priv, RXCHK_CONTROL);
1075 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
1076 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
1077 rxchk_writel(priv, reg, RXCHK_CONTROL);
1079 /* Clear the MagicPacket detection logic */
1080 mpd_enable_set(priv, false);
1082 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1083 if (reg & INTRL2_0_MPD)
1084 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1086 if (reg & INTRL2_0_BRCM_MATCH_TAG) {
1087 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1088 RXCHK_BRCM_TAG_MATCH_MASK;
1089 netdev_info(priv->netdev,
1090 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1093 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1096 static void bcm_sysport_dim_work(struct work_struct *work)
1098 struct net_dim *dim = container_of(work, struct net_dim, work);
1099 struct bcm_sysport_net_dim *ndim =
1100 container_of(dim, struct bcm_sysport_net_dim, dim);
1101 struct bcm_sysport_priv *priv =
1102 container_of(ndim, struct bcm_sysport_priv, dim);
1103 struct net_dim_cq_moder cur_profile =
1104 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1106 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
1107 dim->state = NET_DIM_START_MEASURE;
1110 /* RX and misc interrupt routine */
1111 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1113 struct net_device *dev = dev_id;
1114 struct bcm_sysport_priv *priv = netdev_priv(dev);
1115 struct bcm_sysport_tx_ring *txr;
1116 unsigned int ring, ring_bit;
1118 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1119 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1120 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1122 if (unlikely(priv->irq0_stat == 0)) {
1123 netdev_warn(priv->netdev, "spurious RX interrupt\n");
1127 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1128 priv->dim.event_ctr++;
1129 if (likely(napi_schedule_prep(&priv->napi))) {
1130 /* disable RX interrupts */
1131 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1132 __napi_schedule_irqoff(&priv->napi);
1136 /* TX ring is full, perform a full reclaim since we do not know
1137 * which one would trigger this interrupt
1139 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1140 bcm_sysport_tx_reclaim_all(priv);
1145 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1146 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1147 if (!(priv->irq0_stat & ring_bit))
1150 txr = &priv->tx_rings[ring];
1152 if (likely(napi_schedule_prep(&txr->napi))) {
1153 intrl2_0_mask_set(priv, ring_bit);
1154 __napi_schedule(&txr->napi);
1161 /* TX interrupt service routine */
1162 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1164 struct net_device *dev = dev_id;
1165 struct bcm_sysport_priv *priv = netdev_priv(dev);
1166 struct bcm_sysport_tx_ring *txr;
1169 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1170 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1171 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1173 if (unlikely(priv->irq1_stat == 0)) {
1174 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1178 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1179 if (!(priv->irq1_stat & BIT(ring)))
1182 txr = &priv->tx_rings[ring];
1184 if (likely(napi_schedule_prep(&txr->napi))) {
1185 intrl2_1_mask_set(priv, BIT(ring));
1186 __napi_schedule_irqoff(&txr->napi);
1193 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1195 struct bcm_sysport_priv *priv = dev_id;
1197 pm_wakeup_event(&priv->pdev->dev, 0);
1202 #ifdef CONFIG_NET_POLL_CONTROLLER
1203 static void bcm_sysport_poll_controller(struct net_device *dev)
1205 struct bcm_sysport_priv *priv = netdev_priv(dev);
1207 disable_irq(priv->irq0);
1208 bcm_sysport_rx_isr(priv->irq0, priv);
1209 enable_irq(priv->irq0);
1211 if (!priv->is_lite) {
1212 disable_irq(priv->irq1);
1213 bcm_sysport_tx_isr(priv->irq1, priv);
1214 enable_irq(priv->irq1);
1219 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1220 struct net_device *dev)
1222 struct bcm_sysport_priv *priv = netdev_priv(dev);
1223 struct sk_buff *nskb;
1224 struct bcm_tsb *tsb;
1230 /* Re-allocate SKB if needed */
1231 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1232 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1234 dev_kfree_skb_any(skb);
1235 priv->mib.tx_realloc_tsb_failed++;
1236 dev->stats.tx_errors++;
1237 dev->stats.tx_dropped++;
1240 dev_consume_skb_any(skb);
1242 priv->mib.tx_realloc_tsb++;
1245 tsb = skb_push(skb, sizeof(*tsb));
1246 /* Zero-out TSB by default */
1247 memset(tsb, 0, sizeof(*tsb));
1249 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1250 ip_ver = skb->protocol;
1252 case htons(ETH_P_IP):
1253 ip_proto = ip_hdr(skb)->protocol;
1255 case htons(ETH_P_IPV6):
1256 ip_proto = ipv6_hdr(skb)->nexthdr;
1262 /* Get the checksum offset and the L4 (transport) offset */
1263 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1264 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1265 csum_info |= (csum_start << L4_PTR_SHIFT);
1267 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1268 csum_info |= L4_LENGTH_VALID;
1269 if (ip_proto == IPPROTO_UDP &&
1270 ip_ver == htons(ETH_P_IP))
1271 csum_info |= L4_UDP;
1276 tsb->l4_ptr_dest_map = csum_info;
1282 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1283 struct net_device *dev)
1285 struct bcm_sysport_priv *priv = netdev_priv(dev);
1286 struct device *kdev = &priv->pdev->dev;
1287 struct bcm_sysport_tx_ring *ring;
1288 struct bcm_sysport_cb *cb;
1289 struct netdev_queue *txq;
1290 struct dma_desc *desc;
1291 unsigned int skb_len;
1292 unsigned long flags;
1298 queue = skb_get_queue_mapping(skb);
1299 txq = netdev_get_tx_queue(dev, queue);
1300 ring = &priv->tx_rings[queue];
1302 /* lock against tx reclaim in BH context and TX ring full interrupt */
1303 spin_lock_irqsave(&ring->lock, flags);
1304 if (unlikely(ring->desc_count == 0)) {
1305 netif_tx_stop_queue(txq);
1306 netdev_err(dev, "queue %d awake and ring full!\n", queue);
1307 ret = NETDEV_TX_BUSY;
1311 /* Insert TSB and checksum infos */
1313 skb = bcm_sysport_insert_tsb(skb, dev);
1322 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1323 if (dma_mapping_error(kdev, mapping)) {
1324 priv->mib.tx_dma_failed++;
1325 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1326 skb->data, skb_len);
1331 /* Remember the SKB for future freeing */
1332 cb = &ring->cbs[ring->curr_desc];
1334 dma_unmap_addr_set(cb, dma_addr, mapping);
1335 dma_unmap_len_set(cb, dma_len, skb_len);
1337 /* Fetch a descriptor entry from our pool */
1338 desc = ring->desc_cpu;
1340 desc->addr_lo = lower_32_bits(mapping);
1341 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1342 len_status |= (skb_len << DESC_LEN_SHIFT);
1343 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1345 if (skb->ip_summed == CHECKSUM_PARTIAL)
1346 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1349 if (ring->curr_desc == ring->size)
1350 ring->curr_desc = 0;
1353 /* Ensure write completion of the descriptor status/length
1354 * in DRAM before the System Port WRITE_PORT register latches
1358 desc->addr_status_len = len_status;
1361 /* Write this descriptor address to the RING write port */
1362 tdma_port_write_desc_addr(priv, desc, ring->index);
1364 /* Check ring space and update SW control flow */
1365 if (ring->desc_count == 0)
1366 netif_tx_stop_queue(txq);
1368 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1369 ring->index, ring->desc_count, ring->curr_desc);
1373 spin_unlock_irqrestore(&ring->lock, flags);
1377 static void bcm_sysport_tx_timeout(struct net_device *dev)
1379 netdev_warn(dev, "transmit timeout!\n");
1381 netif_trans_update(dev);
1382 dev->stats.tx_errors++;
1384 netif_tx_wake_all_queues(dev);
1387 /* phylib adjust link callback */
1388 static void bcm_sysport_adj_link(struct net_device *dev)
1390 struct bcm_sysport_priv *priv = netdev_priv(dev);
1391 struct phy_device *phydev = dev->phydev;
1392 unsigned int changed = 0;
1393 u32 cmd_bits = 0, reg;
1395 if (priv->old_link != phydev->link) {
1397 priv->old_link = phydev->link;
1400 if (priv->old_duplex != phydev->duplex) {
1402 priv->old_duplex = phydev->duplex;
1408 switch (phydev->speed) {
1410 cmd_bits = CMD_SPEED_2500;
1413 cmd_bits = CMD_SPEED_1000;
1416 cmd_bits = CMD_SPEED_100;
1419 cmd_bits = CMD_SPEED_10;
1424 cmd_bits <<= CMD_SPEED_SHIFT;
1426 if (phydev->duplex == DUPLEX_HALF)
1427 cmd_bits |= CMD_HD_EN;
1429 if (priv->old_pause != phydev->pause) {
1431 priv->old_pause = phydev->pause;
1435 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1441 reg = umac_readl(priv, UMAC_CMD);
1442 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1443 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1444 CMD_TX_PAUSE_IGNORE);
1446 umac_writel(priv, reg, UMAC_CMD);
1450 phy_print_status(phydev);
1453 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
1454 void (*cb)(struct work_struct *work))
1456 struct bcm_sysport_net_dim *dim = &priv->dim;
1458 INIT_WORK(&dim->dim.work, cb);
1459 dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1465 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
1467 struct bcm_sysport_net_dim *dim = &priv->dim;
1468 struct net_dim_cq_moder moder;
1471 usecs = priv->rx_coalesce_usecs;
1472 pkts = priv->rx_max_coalesced_frames;
1474 /* If DIM was enabled, re-apply default parameters */
1476 moder = net_dim_get_def_rx_moderation(dim->dim.mode);
1481 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
1484 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1487 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1488 struct device *kdev = &priv->pdev->dev;
1493 /* Simple descriptors partitioning for now */
1496 /* We just need one DMA descriptor which is DMA-able, since writing to
1497 * the port will allocate a new descriptor in its internal linked-list
1499 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1502 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1506 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1508 dma_free_coherent(kdev, sizeof(struct dma_desc),
1509 ring->desc_cpu, ring->desc_dma);
1510 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1514 /* Initialize SW view of the ring */
1515 spin_lock_init(&ring->lock);
1517 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1518 ring->index = index;
1520 ring->clean_index = 0;
1521 ring->alloc_size = ring->size;
1523 ring->desc_count = ring->size;
1524 ring->curr_desc = 0;
1526 /* Initialize HW ring */
1527 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1528 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1529 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1530 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1532 /* Configure QID and port mapping */
1533 reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
1534 reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
1535 if (ring->inspect) {
1536 reg |= ring->switch_queue & RING_QID_MASK;
1537 reg |= ring->switch_port << RING_PORT_ID_SHIFT;
1539 reg |= RING_IGNORE_STATUS;
1541 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1542 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1544 /* Enable ACB algorithm 2 */
1545 reg = tdma_readl(priv, TDMA_CONTROL);
1546 reg |= tdma_control_bit(priv, ACB_ALGO);
1547 tdma_writel(priv, reg, TDMA_CONTROL);
1549 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1550 * with the original definition of ACB_ALGO
1552 reg = tdma_readl(priv, TDMA_CONTROL);
1554 reg &= ~BIT(TSB_SWAP1);
1555 /* Set a correct TSB format based on host endian */
1556 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1557 reg |= tdma_control_bit(priv, TSB_SWAP0);
1559 reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1560 tdma_writel(priv, reg, TDMA_CONTROL);
1562 /* Program the number of descriptors as MAX_THRESHOLD and half of
1563 * its size for the hysteresis trigger
1565 tdma_writel(priv, ring->size |
1566 1 << RING_HYST_THRESH_SHIFT,
1567 TDMA_DESC_RING_MAX_HYST(index));
1569 /* Enable the ring queue in the arbiter */
1570 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1571 reg |= (1 << index);
1572 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1574 napi_enable(&ring->napi);
1576 netif_dbg(priv, hw, priv->netdev,
1577 "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n",
1578 ring->size, ring->desc_cpu, ring->switch_queue,
1584 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1587 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1588 struct device *kdev = &priv->pdev->dev;
1591 /* Caller should stop the TDMA engine */
1592 reg = tdma_readl(priv, TDMA_STATUS);
1593 if (!(reg & TDMA_DISABLED))
1594 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1596 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1597 * fail, so by checking this pointer we know whether the TX ring was
1598 * fully initialized or not.
1603 napi_disable(&ring->napi);
1604 netif_napi_del(&ring->napi);
1606 bcm_sysport_tx_clean(priv, ring);
1611 if (ring->desc_dma) {
1612 dma_free_coherent(kdev, sizeof(struct dma_desc),
1613 ring->desc_cpu, ring->desc_dma);
1617 ring->alloc_size = 0;
1619 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1623 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1624 unsigned int enable)
1626 unsigned int timeout = 1000;
1629 reg = rdma_readl(priv, RDMA_CONTROL);
1634 rdma_writel(priv, reg, RDMA_CONTROL);
1636 /* Poll for RMDA disabling completion */
1638 reg = rdma_readl(priv, RDMA_STATUS);
1639 if (!!(reg & RDMA_DISABLED) == !enable)
1641 usleep_range(1000, 2000);
1642 } while (timeout-- > 0);
1644 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1650 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1651 unsigned int enable)
1653 unsigned int timeout = 1000;
1656 reg = tdma_readl(priv, TDMA_CONTROL);
1658 reg |= tdma_control_bit(priv, TDMA_EN);
1660 reg &= ~tdma_control_bit(priv, TDMA_EN);
1661 tdma_writel(priv, reg, TDMA_CONTROL);
1663 /* Poll for TMDA disabling completion */
1665 reg = tdma_readl(priv, TDMA_STATUS);
1666 if (!!(reg & TDMA_DISABLED) == !enable)
1669 usleep_range(1000, 2000);
1670 } while (timeout-- > 0);
1672 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1677 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1679 struct bcm_sysport_cb *cb;
1684 /* Initialize SW view of the RX ring */
1685 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1686 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1687 priv->rx_c_index = 0;
1688 priv->rx_read_ptr = 0;
1689 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1691 if (!priv->rx_cbs) {
1692 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1696 for (i = 0; i < priv->num_rx_bds; i++) {
1697 cb = priv->rx_cbs + i;
1698 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1701 ret = bcm_sysport_alloc_rx_bufs(priv);
1703 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1707 /* Initialize HW, ensure RDMA is disabled */
1708 reg = rdma_readl(priv, RDMA_STATUS);
1709 if (!(reg & RDMA_DISABLED))
1710 rdma_enable_set(priv, 0);
1712 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1713 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1714 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1715 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1716 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1717 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1718 /* Operate the queue in ring mode */
1719 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1720 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1721 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1722 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1724 netif_dbg(priv, hw, priv->netdev,
1725 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1726 priv->num_rx_bds, priv->rx_bds);
1731 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1733 struct bcm_sysport_cb *cb;
1737 /* Caller should ensure RDMA is disabled */
1738 reg = rdma_readl(priv, RDMA_STATUS);
1739 if (!(reg & RDMA_DISABLED))
1740 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1742 for (i = 0; i < priv->num_rx_bds; i++) {
1743 cb = &priv->rx_cbs[i];
1744 if (dma_unmap_addr(cb, dma_addr))
1745 dma_unmap_single(&priv->pdev->dev,
1746 dma_unmap_addr(cb, dma_addr),
1747 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1748 bcm_sysport_free_cb(cb);
1751 kfree(priv->rx_cbs);
1752 priv->rx_cbs = NULL;
1754 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1757 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1759 struct bcm_sysport_priv *priv = netdev_priv(dev);
1765 reg = umac_readl(priv, UMAC_CMD);
1766 if (dev->flags & IFF_PROMISC)
1769 reg &= ~CMD_PROMISC;
1770 umac_writel(priv, reg, UMAC_CMD);
1772 /* No support for ALLMULTI */
1773 if (dev->flags & IFF_ALLMULTI)
1777 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1778 u32 mask, unsigned int enable)
1782 if (!priv->is_lite) {
1783 reg = umac_readl(priv, UMAC_CMD);
1788 umac_writel(priv, reg, UMAC_CMD);
1790 reg = gib_readl(priv, GIB_CONTROL);
1795 gib_writel(priv, reg, GIB_CONTROL);
1798 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1799 * to be processed (1 msec).
1802 usleep_range(1000, 2000);
1805 static inline void umac_reset(struct bcm_sysport_priv *priv)
1812 reg = umac_readl(priv, UMAC_CMD);
1813 reg |= CMD_SW_RESET;
1814 umac_writel(priv, reg, UMAC_CMD);
1816 reg = umac_readl(priv, UMAC_CMD);
1817 reg &= ~CMD_SW_RESET;
1818 umac_writel(priv, reg, UMAC_CMD);
1821 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1822 unsigned char *addr)
1824 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1826 u32 mac1 = (addr[4] << 8) | addr[5];
1828 if (!priv->is_lite) {
1829 umac_writel(priv, mac0, UMAC_MAC0);
1830 umac_writel(priv, mac1, UMAC_MAC1);
1832 gib_writel(priv, mac0, GIB_MAC0);
1833 gib_writel(priv, mac1, GIB_MAC1);
1837 static void topctrl_flush(struct bcm_sysport_priv *priv)
1839 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1840 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1842 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1843 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1846 static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1848 struct bcm_sysport_priv *priv = netdev_priv(dev);
1849 struct sockaddr *addr = p;
1851 if (!is_valid_ether_addr(addr->sa_data))
1854 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1856 /* interface is disabled, changes to MAC will be reflected on next
1859 if (!netif_running(dev))
1862 umac_set_hw_addr(priv, dev->dev_addr);
1867 static void bcm_sysport_get_stats64(struct net_device *dev,
1868 struct rtnl_link_stats64 *stats)
1870 struct bcm_sysport_priv *priv = netdev_priv(dev);
1871 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1874 netdev_stats_to_stats64(stats, &dev->stats);
1876 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1877 &stats->tx_packets);
1880 start = u64_stats_fetch_begin_irq(&priv->syncp);
1881 stats->rx_packets = stats64->rx_packets;
1882 stats->rx_bytes = stats64->rx_bytes;
1883 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
1886 static void bcm_sysport_netif_start(struct net_device *dev)
1888 struct bcm_sysport_priv *priv = netdev_priv(dev);
1891 bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
1892 bcm_sysport_init_rx_coalesce(priv);
1893 napi_enable(&priv->napi);
1895 /* Enable RX interrupt and TX ring full interrupt */
1896 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1898 phy_start(dev->phydev);
1900 /* Enable TX interrupts for the TXQs */
1902 intrl2_1_mask_clear(priv, 0xffffffff);
1904 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1907 static void rbuf_init(struct bcm_sysport_priv *priv)
1911 reg = rbuf_readl(priv, RBUF_CONTROL);
1912 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1913 /* Set a correct RSB format on SYSTEMPORT Lite */
1915 reg &= ~RBUF_RSB_SWAP1;
1917 /* Set a correct RSB format based on host endian */
1918 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1919 reg |= RBUF_RSB_SWAP0;
1921 reg &= ~RBUF_RSB_SWAP0;
1922 rbuf_writel(priv, reg, RBUF_CONTROL);
1925 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1927 intrl2_0_mask_set(priv, 0xffffffff);
1928 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1929 if (!priv->is_lite) {
1930 intrl2_1_mask_set(priv, 0xffffffff);
1931 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1935 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1939 reg = gib_readl(priv, GIB_CONTROL);
1940 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1941 if (netdev_uses_dsa(priv->netdev)) {
1942 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1943 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1945 reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
1946 reg |= 12 << GIB_IPG_LEN_SHIFT;
1947 gib_writel(priv, reg, GIB_CONTROL);
1950 static int bcm_sysport_open(struct net_device *dev)
1952 struct bcm_sysport_priv *priv = netdev_priv(dev);
1953 struct phy_device *phydev;
1960 /* Flush TX and RX FIFOs at TOPCTRL level */
1961 topctrl_flush(priv);
1963 /* Disable the UniMAC RX/TX */
1964 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1966 /* Enable RBUF 2bytes alignment and Receive Status Block */
1969 /* Set maximum frame length */
1971 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1973 gib_set_pad_extension(priv);
1975 /* Apply features again in case we changed them while interface was
1978 bcm_sysport_set_features(dev, dev->features);
1980 /* Set MAC address */
1981 umac_set_hw_addr(priv, dev->dev_addr);
1983 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1984 0, priv->phy_interface);
1986 netdev_err(dev, "could not attach to PHY\n");
1990 /* Reset house keeping link status */
1991 priv->old_duplex = -1;
1992 priv->old_link = -1;
1993 priv->old_pause = -1;
1995 /* mask all interrupts and request them */
1996 bcm_sysport_mask_all_intrs(priv);
1998 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
2000 netdev_err(dev, "failed to request RX interrupt\n");
2001 goto out_phy_disconnect;
2004 if (!priv->is_lite) {
2005 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
2008 netdev_err(dev, "failed to request TX interrupt\n");
2013 /* Initialize both hardware and software ring */
2014 for (i = 0; i < dev->num_tx_queues; i++) {
2015 ret = bcm_sysport_init_tx_ring(priv, i);
2017 netdev_err(dev, "failed to initialize TX ring %d\n",
2019 goto out_free_tx_ring;
2023 /* Initialize linked-list */
2024 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2026 /* Initialize RX ring */
2027 ret = bcm_sysport_init_rx_ring(priv);
2029 netdev_err(dev, "failed to initialize RX ring\n");
2030 goto out_free_rx_ring;
2034 ret = rdma_enable_set(priv, 1);
2036 goto out_free_rx_ring;
2039 ret = tdma_enable_set(priv, 1);
2041 goto out_clear_rx_int;
2043 /* Turn on UniMAC TX/RX */
2044 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
2046 bcm_sysport_netif_start(dev);
2048 netif_tx_start_all_queues(dev);
2053 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
2055 bcm_sysport_fini_rx_ring(priv);
2057 for (i = 0; i < dev->num_tx_queues; i++)
2058 bcm_sysport_fini_tx_ring(priv, i);
2060 free_irq(priv->irq1, dev);
2062 free_irq(priv->irq0, dev);
2064 phy_disconnect(phydev);
2068 static void bcm_sysport_netif_stop(struct net_device *dev)
2070 struct bcm_sysport_priv *priv = netdev_priv(dev);
2072 /* stop all software from updating hardware */
2073 netif_tx_disable(dev);
2074 napi_disable(&priv->napi);
2075 cancel_work_sync(&priv->dim.dim.work);
2076 phy_stop(dev->phydev);
2078 /* mask all interrupts */
2079 bcm_sysport_mask_all_intrs(priv);
2082 static int bcm_sysport_stop(struct net_device *dev)
2084 struct bcm_sysport_priv *priv = netdev_priv(dev);
2088 bcm_sysport_netif_stop(dev);
2090 /* Disable UniMAC RX */
2091 umac_enable_set(priv, CMD_RX_EN, 0);
2093 ret = tdma_enable_set(priv, 0);
2095 netdev_err(dev, "timeout disabling RDMA\n");
2099 /* Wait for a maximum packet size to be drained */
2100 usleep_range(2000, 3000);
2102 ret = rdma_enable_set(priv, 0);
2104 netdev_err(dev, "timeout disabling TDMA\n");
2108 /* Disable UniMAC TX */
2109 umac_enable_set(priv, CMD_TX_EN, 0);
2111 /* Free RX/TX rings SW structures */
2112 for (i = 0; i < dev->num_tx_queues; i++)
2113 bcm_sysport_fini_tx_ring(priv, i);
2114 bcm_sysport_fini_rx_ring(priv);
2116 free_irq(priv->irq0, dev);
2118 free_irq(priv->irq1, dev);
2120 /* Disconnect from PHY */
2121 phy_disconnect(dev->phydev);
2126 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
2132 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2133 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2134 reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
2135 reg &= RXCHK_BRCM_TAG_CID_MASK;
2136 if (reg == location)
2143 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
2144 struct ethtool_rxnfc *nfc)
2148 /* This is not a rule that we know about */
2149 index = bcm_sysport_rule_find(priv, nfc->fs.location);
2153 nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
2158 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
2159 struct ethtool_rxnfc *nfc)
2164 /* We cannot match locations greater than what the classification ID
2165 * permits (256 entries)
2167 if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
2170 /* We cannot support flows that are not destined for a wake-up */
2171 if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
2174 /* All filters are already in use, we cannot match more rules */
2175 if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
2179 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
2180 if (index > RXCHK_BRCM_TAG_MAX)
2183 /* Location is the classification ID, and index is the position
2184 * within one of our 8 possible filters to be programmed
2186 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2187 reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
2188 reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
2189 rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
2190 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
2192 set_bit(index, priv->filters);
2197 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
2202 /* This is not a rule that we know about */
2203 index = bcm_sysport_rule_find(priv, location);
2207 /* No need to disable this filter if it was enabled, this will
2208 * be taken care of during suspend time by bcm_sysport_suspend_to_wol
2210 clear_bit(index, priv->filters);
2215 static int bcm_sysport_get_rxnfc(struct net_device *dev,
2216 struct ethtool_rxnfc *nfc, u32 *rule_locs)
2218 struct bcm_sysport_priv *priv = netdev_priv(dev);
2219 int ret = -EOPNOTSUPP;
2222 case ETHTOOL_GRXCLSRULE:
2223 ret = bcm_sysport_rule_get(priv, nfc);
2232 static int bcm_sysport_set_rxnfc(struct net_device *dev,
2233 struct ethtool_rxnfc *nfc)
2235 struct bcm_sysport_priv *priv = netdev_priv(dev);
2236 int ret = -EOPNOTSUPP;
2239 case ETHTOOL_SRXCLSRLINS:
2240 ret = bcm_sysport_rule_set(priv, nfc);
2242 case ETHTOOL_SRXCLSRLDEL:
2243 ret = bcm_sysport_rule_del(priv, nfc->fs.location);
2252 static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2253 .get_drvinfo = bcm_sysport_get_drvinfo,
2254 .get_msglevel = bcm_sysport_get_msglvl,
2255 .set_msglevel = bcm_sysport_set_msglvl,
2256 .get_link = ethtool_op_get_link,
2257 .get_strings = bcm_sysport_get_strings,
2258 .get_ethtool_stats = bcm_sysport_get_stats,
2259 .get_sset_count = bcm_sysport_get_sset_count,
2260 .get_wol = bcm_sysport_get_wol,
2261 .set_wol = bcm_sysport_set_wol,
2262 .get_coalesce = bcm_sysport_get_coalesce,
2263 .set_coalesce = bcm_sysport_set_coalesce,
2264 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2265 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2266 .get_rxnfc = bcm_sysport_get_rxnfc,
2267 .set_rxnfc = bcm_sysport_set_rxnfc,
2270 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2271 struct net_device *sb_dev,
2272 select_queue_fallback_t fallback)
2274 struct bcm_sysport_priv *priv = netdev_priv(dev);
2275 u16 queue = skb_get_queue_mapping(skb);
2276 struct bcm_sysport_tx_ring *tx_ring;
2277 unsigned int q, port;
2279 if (!netdev_uses_dsa(dev))
2280 return fallback(dev, skb, NULL);
2282 /* DSA tagging layer will have configured the correct queue */
2283 q = BRCM_TAG_GET_QUEUE(queue);
2284 port = BRCM_TAG_GET_PORT(queue);
2285 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2287 if (unlikely(!tx_ring))
2288 return fallback(dev, skb, NULL);
2290 return tx_ring->index;
2293 static const struct net_device_ops bcm_sysport_netdev_ops = {
2294 .ndo_start_xmit = bcm_sysport_xmit,
2295 .ndo_tx_timeout = bcm_sysport_tx_timeout,
2296 .ndo_open = bcm_sysport_open,
2297 .ndo_stop = bcm_sysport_stop,
2298 .ndo_set_features = bcm_sysport_set_features,
2299 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
2300 .ndo_set_mac_address = bcm_sysport_change_mac,
2301 #ifdef CONFIG_NET_POLL_CONTROLLER
2302 .ndo_poll_controller = bcm_sysport_poll_controller,
2304 .ndo_get_stats64 = bcm_sysport_get_stats64,
2305 .ndo_select_queue = bcm_sysport_select_queue,
2308 static int bcm_sysport_map_queues(struct notifier_block *nb,
2309 struct dsa_notifier_register_info *info)
2311 struct bcm_sysport_tx_ring *ring;
2312 struct bcm_sysport_priv *priv;
2313 struct net_device *slave_dev;
2314 unsigned int num_tx_queues;
2315 unsigned int q, start, port;
2316 struct net_device *dev;
2318 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
2319 if (priv->netdev != info->master)
2324 /* We can't be setting up queue inspection for non directly attached
2327 if (info->switch_number)
2330 if (dev->netdev_ops != &bcm_sysport_netdev_ops)
2333 port = info->port_number;
2334 slave_dev = info->info.dev;
2336 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
2337 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
2338 * per-port (slave_dev) network devices queue, we achieve just that.
2339 * This need to happen now before any slave network device is used such
2340 * it accurately reflects the number of real TX queues.
2343 netif_set_real_num_tx_queues(slave_dev,
2344 slave_dev->num_tx_queues / 2);
2346 num_tx_queues = slave_dev->real_num_tx_queues;
2348 if (priv->per_port_num_tx_queues &&
2349 priv->per_port_num_tx_queues != num_tx_queues)
2350 netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
2352 priv->per_port_num_tx_queues = num_tx_queues;
2354 start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
2355 for (q = 0; q < num_tx_queues; q++) {
2356 ring = &priv->tx_rings[q + start];
2358 /* Just remember the mapping actual programming done
2359 * during bcm_sysport_init_tx_ring
2361 ring->switch_queue = q;
2362 ring->switch_port = port;
2363 ring->inspect = true;
2364 priv->ring_map[q + port * num_tx_queues] = ring;
2366 /* Set all queues as being used now */
2367 set_bit(q + start, &priv->queue_bitmap);
2373 static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
2374 unsigned long event, void *ptr)
2376 struct dsa_notifier_register_info *info;
2378 if (event != DSA_PORT_REGISTER)
2383 return notifier_from_errno(bcm_sysport_map_queues(nb, info));
2386 #define REV_FMT "v%2x.%02x"
2388 static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
2391 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
2393 [SYSTEMPORT_LITE] = {
2395 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
2399 static const struct of_device_id bcm_sysport_of_match[] = {
2400 { .compatible = "brcm,systemportlite-v1.00",
2401 .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
2402 { .compatible = "brcm,systemport-v1.00",
2403 .data = &bcm_sysport_params[SYSTEMPORT] },
2404 { .compatible = "brcm,systemport",
2405 .data = &bcm_sysport_params[SYSTEMPORT] },
2408 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2410 static int bcm_sysport_probe(struct platform_device *pdev)
2412 const struct bcm_sysport_hw_params *params;
2413 const struct of_device_id *of_id = NULL;
2414 struct bcm_sysport_priv *priv;
2415 struct device_node *dn;
2416 struct net_device *dev;
2417 const void *macaddr;
2422 dn = pdev->dev.of_node;
2423 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2424 of_id = of_match_node(bcm_sysport_of_match, dn);
2425 if (!of_id || !of_id->data)
2428 /* Fairly quickly we need to know the type of adapter we have */
2429 params = of_id->data;
2431 /* Read the Transmit/Receive Queue properties */
2432 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2433 txq = TDMA_NUM_RINGS;
2434 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2437 /* Sanity check the number of transmit queues */
2438 if (!txq || txq > TDMA_NUM_RINGS)
2441 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2445 /* Initialize private members */
2446 priv = netdev_priv(dev);
2448 /* Allocate number of TX rings */
2449 priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2450 sizeof(struct bcm_sysport_tx_ring),
2452 if (!priv->tx_rings)
2455 priv->is_lite = params->is_lite;
2456 priv->num_rx_desc_words = params->num_rx_desc_words;
2458 priv->irq0 = platform_get_irq(pdev, 0);
2459 if (!priv->is_lite) {
2460 priv->irq1 = platform_get_irq(pdev, 1);
2461 priv->wol_irq = platform_get_irq(pdev, 2);
2463 priv->wol_irq = platform_get_irq(pdev, 1);
2465 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2466 dev_err(&pdev->dev, "invalid interrupts\n");
2468 goto err_free_netdev;
2471 priv->base = devm_ioremap_resource(&pdev->dev, r);
2472 if (IS_ERR(priv->base)) {
2473 ret = PTR_ERR(priv->base);
2474 goto err_free_netdev;
2480 priv->phy_interface = of_get_phy_mode(dn);
2481 /* Default to GMII interface mode */
2482 if (priv->phy_interface < 0)
2483 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2485 /* In the case of a fixed PHY, the DT node associated
2486 * to the PHY is the Ethernet MAC DT node.
2488 if (of_phy_is_fixed_link(dn)) {
2489 ret = of_phy_register_fixed_link(dn);
2491 dev_err(&pdev->dev, "failed to register fixed PHY\n");
2492 goto err_free_netdev;
2498 /* Initialize netdevice members */
2499 macaddr = of_get_mac_address(dn);
2500 if (!macaddr || !is_valid_ether_addr(macaddr)) {
2501 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
2502 eth_hw_addr_random(dev);
2504 ether_addr_copy(dev->dev_addr, macaddr);
2507 SET_NETDEV_DEV(dev, &pdev->dev);
2508 dev_set_drvdata(&pdev->dev, dev);
2509 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
2510 dev->netdev_ops = &bcm_sysport_netdev_ops;
2511 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2513 dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2514 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2515 dev->hw_features |= dev->features;
2516 dev->vlan_features |= dev->features;
2518 /* Request the WOL interrupt and advertise suspend if available */
2519 priv->wol_irq_disabled = 1;
2520 ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2521 bcm_sysport_wol_isr, 0, dev->name, priv);
2523 device_set_wakeup_capable(&pdev->dev, 1);
2525 /* Set the needed headroom once and for all */
2526 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2527 dev->needed_headroom += sizeof(struct bcm_tsb);
2529 /* libphy will adjust the link state accordingly */
2530 netif_carrier_off(dev);
2532 priv->rx_max_coalesced_frames = 1;
2533 u64_stats_init(&priv->syncp);
2535 priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
2537 ret = register_dsa_notifier(&priv->dsa_notifier);
2539 dev_err(&pdev->dev, "failed to register DSA notifier\n");
2540 goto err_deregister_fixed_link;
2543 ret = register_netdev(dev);
2545 dev_err(&pdev->dev, "failed to register net_device\n");
2546 goto err_deregister_notifier;
2549 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2550 dev_info(&pdev->dev,
2551 "Broadcom SYSTEMPORT%s" REV_FMT
2552 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2553 priv->is_lite ? " Lite" : "",
2554 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2555 priv->base, priv->irq0, priv->irq1, txq, rxq);
2559 err_deregister_notifier:
2560 unregister_dsa_notifier(&priv->dsa_notifier);
2561 err_deregister_fixed_link:
2562 if (of_phy_is_fixed_link(dn))
2563 of_phy_deregister_fixed_link(dn);
2569 static int bcm_sysport_remove(struct platform_device *pdev)
2571 struct net_device *dev = dev_get_drvdata(&pdev->dev);
2572 struct bcm_sysport_priv *priv = netdev_priv(dev);
2573 struct device_node *dn = pdev->dev.of_node;
2575 /* Not much to do, ndo_close has been called
2576 * and we use managed allocations
2578 unregister_dsa_notifier(&priv->dsa_notifier);
2579 unregister_netdev(dev);
2580 if (of_phy_is_fixed_link(dn))
2581 of_phy_deregister_fixed_link(dn);
2583 dev_set_drvdata(&pdev->dev, NULL);
2588 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2590 struct net_device *ndev = priv->netdev;
2591 unsigned int timeout = 1000;
2592 unsigned int index, i = 0;
2595 /* Password has already been programmed */
2596 reg = umac_readl(priv, UMAC_MPD_CTRL);
2597 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2600 if (priv->wolopts & WAKE_MAGICSECURE)
2602 umac_writel(priv, reg, UMAC_MPD_CTRL);
2604 if (priv->wolopts & WAKE_FILTER) {
2605 /* Turn on ACPI matching to steal packets from RBUF */
2606 reg = rbuf_readl(priv, RBUF_CONTROL);
2608 reg |= RBUF_ACPI_EN_LITE;
2610 reg |= RBUF_ACPI_EN;
2611 rbuf_writel(priv, reg, RBUF_CONTROL);
2613 /* Enable RXCHK, active filters and Broadcom tag matching */
2614 reg = rxchk_readl(priv, RXCHK_CONTROL);
2615 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
2616 RXCHK_BRCM_TAG_MATCH_SHIFT);
2617 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2618 reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
2621 reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
2622 rxchk_writel(priv, reg, RXCHK_CONTROL);
2625 /* Make sure RBUF entered WoL mode as result */
2627 reg = rbuf_readl(priv, RBUF_STATUS);
2628 if (reg & RBUF_WOL_MODE)
2632 } while (timeout-- > 0);
2634 /* Do not leave the UniMAC RBUF matching only MPD packets */
2636 mpd_enable_set(priv, false);
2637 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2641 /* UniMAC receive needs to be turned on */
2642 umac_enable_set(priv, CMD_RX_EN, 1);
2644 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2649 static int __maybe_unused bcm_sysport_suspend(struct device *d)
2651 struct net_device *dev = dev_get_drvdata(d);
2652 struct bcm_sysport_priv *priv = netdev_priv(dev);
2657 if (!netif_running(dev))
2660 netif_device_detach(dev);
2662 bcm_sysport_netif_stop(dev);
2664 phy_suspend(dev->phydev);
2666 /* Disable UniMAC RX */
2667 umac_enable_set(priv, CMD_RX_EN, 0);
2669 ret = rdma_enable_set(priv, 0);
2671 netdev_err(dev, "RDMA timeout!\n");
2675 /* Disable RXCHK if enabled */
2676 if (priv->rx_chk_en) {
2677 reg = rxchk_readl(priv, RXCHK_CONTROL);
2679 rxchk_writel(priv, reg, RXCHK_CONTROL);
2684 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2686 ret = tdma_enable_set(priv, 0);
2688 netdev_err(dev, "TDMA timeout!\n");
2692 /* Wait for a packet boundary */
2693 usleep_range(2000, 3000);
2695 umac_enable_set(priv, CMD_TX_EN, 0);
2697 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2699 /* Free RX/TX rings SW structures */
2700 for (i = 0; i < dev->num_tx_queues; i++)
2701 bcm_sysport_fini_tx_ring(priv, i);
2702 bcm_sysport_fini_rx_ring(priv);
2704 /* Get prepared for Wake-on-LAN */
2705 if (device_may_wakeup(d) && priv->wolopts)
2706 ret = bcm_sysport_suspend_to_wol(priv);
2711 static int __maybe_unused bcm_sysport_resume(struct device *d)
2713 struct net_device *dev = dev_get_drvdata(d);
2714 struct bcm_sysport_priv *priv = netdev_priv(dev);
2718 if (!netif_running(dev))
2723 /* We may have been suspended and never received a WOL event that
2724 * would turn off MPD detection, take care of that now
2726 bcm_sysport_resume_from_wol(priv);
2728 /* Initialize both hardware and software ring */
2729 for (i = 0; i < dev->num_tx_queues; i++) {
2730 ret = bcm_sysport_init_tx_ring(priv, i);
2732 netdev_err(dev, "failed to initialize TX ring %d\n",
2734 goto out_free_tx_rings;
2738 /* Initialize linked-list */
2739 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2741 /* Initialize RX ring */
2742 ret = bcm_sysport_init_rx_ring(priv);
2744 netdev_err(dev, "failed to initialize RX ring\n");
2745 goto out_free_rx_ring;
2748 /* RX pipe enable */
2749 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2751 ret = rdma_enable_set(priv, 1);
2753 netdev_err(dev, "failed to enable RDMA\n");
2754 goto out_free_rx_ring;
2757 /* Restore enabled features */
2758 bcm_sysport_set_features(dev, dev->features);
2762 /* Set maximum frame length */
2764 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2766 gib_set_pad_extension(priv);
2768 /* Set MAC address */
2769 umac_set_hw_addr(priv, dev->dev_addr);
2771 umac_enable_set(priv, CMD_RX_EN, 1);
2773 /* TX pipe enable */
2774 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2776 umac_enable_set(priv, CMD_TX_EN, 1);
2778 ret = tdma_enable_set(priv, 1);
2780 netdev_err(dev, "TDMA timeout!\n");
2781 goto out_free_rx_ring;
2784 phy_resume(dev->phydev);
2786 bcm_sysport_netif_start(dev);
2788 netif_device_attach(dev);
2793 bcm_sysport_fini_rx_ring(priv);
2795 for (i = 0; i < dev->num_tx_queues; i++)
2796 bcm_sysport_fini_tx_ring(priv, i);
2800 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2801 bcm_sysport_suspend, bcm_sysport_resume);
2803 static struct platform_driver bcm_sysport_driver = {
2804 .probe = bcm_sysport_probe,
2805 .remove = bcm_sysport_remove,
2807 .name = "brcm-systemport",
2808 .of_match_table = bcm_sysport_of_match,
2809 .pm = &bcm_sysport_pm_ops,
2812 module_platform_driver(bcm_sysport_driver);
2814 MODULE_AUTHOR("Broadcom Corporation");
2815 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2816 MODULE_ALIAS("platform:brcm-systemport");
2817 MODULE_LICENSE("GPL");