net: systemport: Correct IPG length settings
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / bcmsysport.c
1 /*
2  * Broadcom BCM7xxx System Port Ethernet MAC driver
3  *
4  * Copyright (C) 2014 Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/platform_device.h>
20 #include <linux/of.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <linux/phy.h>
24 #include <linux/phy_fixed.h>
25 #include <net/dsa.h>
26 #include <net/ip.h>
27 #include <net/ipv6.h>
28
29 #include "bcmsysport.h"
30
31 /* I/O accessors register helpers */
32 #define BCM_SYSPORT_IO_MACRO(name, offset) \
33 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)  \
34 {                                                                       \
35         u32 reg = readl_relaxed(priv->base + offset + off);             \
36         return reg;                                                     \
37 }                                                                       \
38 static inline void name##_writel(struct bcm_sysport_priv *priv,         \
39                                   u32 val, u32 off)                     \
40 {                                                                       \
41         writel_relaxed(val, priv->base + offset + off);                 \
42 }                                                                       \
43
44 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
45 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
46 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
47 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
48 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
49 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
50 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
51 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
52 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
53 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
54
55 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
56  * same layout, except it has been moved by 4 bytes up, *sigh*
57  */
58 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
59 {
60         if (priv->is_lite && off >= RDMA_STATUS)
61                 off += 4;
62         return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
63 }
64
65 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
66 {
67         if (priv->is_lite && off >= RDMA_STATUS)
68                 off += 4;
69         writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
70 }
71
72 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
73 {
74         if (!priv->is_lite) {
75                 return BIT(bit);
76         } else {
77                 if (bit >= ACB_ALGO)
78                         return BIT(bit + 1);
79                 else
80                         return BIT(bit);
81         }
82 }
83
84 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
85  * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
86   */
87 #define BCM_SYSPORT_INTR_L2(which)      \
88 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
89                                                 u32 mask)               \
90 {                                                                       \
91         priv->irq##which##_mask &= ~(mask);                             \
92         intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);     \
93 }                                                                       \
94 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
95                                                 u32 mask)               \
96 {                                                                       \
97         intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);      \
98         priv->irq##which##_mask |= (mask);                              \
99 }                                                                       \
100
101 BCM_SYSPORT_INTR_L2(0)
102 BCM_SYSPORT_INTR_L2(1)
103
104 /* Register accesses to GISB/RBUS registers are expensive (few hundred
105  * nanoseconds), so keep the check for 64-bits explicit here to save
106  * one register write per-packet on 32-bits platforms.
107  */
108 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
109                                      void __iomem *d,
110                                      dma_addr_t addr)
111 {
112 #ifdef CONFIG_PHYS_ADDR_T_64BIT
113         writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
114                      d + DESC_ADDR_HI_STATUS_LEN);
115 #endif
116         writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
117 }
118
119 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
120                                              struct dma_desc *desc,
121                                              unsigned int port)
122 {
123         /* Ports are latched, so write upper address first */
124         tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
125         tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
126 }
127
128 /* Ethtool operations */
129 static int bcm_sysport_set_rx_csum(struct net_device *dev,
130                                    netdev_features_t wanted)
131 {
132         struct bcm_sysport_priv *priv = netdev_priv(dev);
133         u32 reg;
134
135         priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
136         reg = rxchk_readl(priv, RXCHK_CONTROL);
137         if (priv->rx_chk_en)
138                 reg |= RXCHK_EN;
139         else
140                 reg &= ~RXCHK_EN;
141
142         /* If UniMAC forwards CRC, we need to skip over it to get
143          * a valid CHK bit to be set in the per-packet status word
144          */
145         if (priv->rx_chk_en && priv->crc_fwd)
146                 reg |= RXCHK_SKIP_FCS;
147         else
148                 reg &= ~RXCHK_SKIP_FCS;
149
150         /* If Broadcom tags are enabled (e.g: using a switch), make
151          * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
152          * tag after the Ethernet MAC Source Address.
153          */
154         if (netdev_uses_dsa(dev))
155                 reg |= RXCHK_BRCM_TAG_EN;
156         else
157                 reg &= ~RXCHK_BRCM_TAG_EN;
158
159         rxchk_writel(priv, reg, RXCHK_CONTROL);
160
161         return 0;
162 }
163
164 static int bcm_sysport_set_tx_csum(struct net_device *dev,
165                                    netdev_features_t wanted)
166 {
167         struct bcm_sysport_priv *priv = netdev_priv(dev);
168         u32 reg;
169
170         /* Hardware transmit checksum requires us to enable the Transmit status
171          * block prepended to the packet contents
172          */
173         priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
174         reg = tdma_readl(priv, TDMA_CONTROL);
175         if (priv->tsb_en)
176                 reg |= tdma_control_bit(priv, TSB_EN);
177         else
178                 reg &= ~tdma_control_bit(priv, TSB_EN);
179         tdma_writel(priv, reg, TDMA_CONTROL);
180
181         return 0;
182 }
183
184 static int bcm_sysport_set_features(struct net_device *dev,
185                                     netdev_features_t features)
186 {
187         netdev_features_t changed = features ^ dev->features;
188         netdev_features_t wanted = dev->wanted_features;
189         int ret = 0;
190
191         if (changed & NETIF_F_RXCSUM)
192                 ret = bcm_sysport_set_rx_csum(dev, wanted);
193         if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
194                 ret = bcm_sysport_set_tx_csum(dev, wanted);
195
196         return ret;
197 }
198
199 /* Hardware counters must be kept in sync because the order/offset
200  * is important here (order in structure declaration = order in hardware)
201  */
202 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
203         /* general stats */
204         STAT_NETDEV64(rx_packets),
205         STAT_NETDEV64(tx_packets),
206         STAT_NETDEV64(rx_bytes),
207         STAT_NETDEV64(tx_bytes),
208         STAT_NETDEV(rx_errors),
209         STAT_NETDEV(tx_errors),
210         STAT_NETDEV(rx_dropped),
211         STAT_NETDEV(tx_dropped),
212         STAT_NETDEV(multicast),
213         /* UniMAC RSV counters */
214         STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
215         STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
216         STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
217         STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
218         STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
219         STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
220         STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
221         STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
222         STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
223         STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
224         STAT_MIB_RX("rx_pkts", mib.rx.pkt),
225         STAT_MIB_RX("rx_bytes", mib.rx.bytes),
226         STAT_MIB_RX("rx_multicast", mib.rx.mca),
227         STAT_MIB_RX("rx_broadcast", mib.rx.bca),
228         STAT_MIB_RX("rx_fcs", mib.rx.fcs),
229         STAT_MIB_RX("rx_control", mib.rx.cf),
230         STAT_MIB_RX("rx_pause", mib.rx.pf),
231         STAT_MIB_RX("rx_unknown", mib.rx.uo),
232         STAT_MIB_RX("rx_align", mib.rx.aln),
233         STAT_MIB_RX("rx_outrange", mib.rx.flr),
234         STAT_MIB_RX("rx_code", mib.rx.cde),
235         STAT_MIB_RX("rx_carrier", mib.rx.fcr),
236         STAT_MIB_RX("rx_oversize", mib.rx.ovr),
237         STAT_MIB_RX("rx_jabber", mib.rx.jbr),
238         STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
239         STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
240         STAT_MIB_RX("rx_unicast", mib.rx.uc),
241         STAT_MIB_RX("rx_ppp", mib.rx.ppp),
242         STAT_MIB_RX("rx_crc", mib.rx.rcrc),
243         /* UniMAC TSV counters */
244         STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
245         STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
246         STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
247         STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
248         STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
249         STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
250         STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
251         STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
252         STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
253         STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
254         STAT_MIB_TX("tx_pkts", mib.tx.pkts),
255         STAT_MIB_TX("tx_multicast", mib.tx.mca),
256         STAT_MIB_TX("tx_broadcast", mib.tx.bca),
257         STAT_MIB_TX("tx_pause", mib.tx.pf),
258         STAT_MIB_TX("tx_control", mib.tx.cf),
259         STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
260         STAT_MIB_TX("tx_oversize", mib.tx.ovr),
261         STAT_MIB_TX("tx_defer", mib.tx.drf),
262         STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
263         STAT_MIB_TX("tx_single_col", mib.tx.scl),
264         STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
265         STAT_MIB_TX("tx_late_col", mib.tx.lcl),
266         STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
267         STAT_MIB_TX("tx_frags", mib.tx.frg),
268         STAT_MIB_TX("tx_total_col", mib.tx.ncl),
269         STAT_MIB_TX("tx_jabber", mib.tx.jbr),
270         STAT_MIB_TX("tx_bytes", mib.tx.bytes),
271         STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
272         STAT_MIB_TX("tx_unicast", mib.tx.uc),
273         /* UniMAC RUNT counters */
274         STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
275         STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
276         STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
277         STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
278         /* RXCHK misc statistics */
279         STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
280         STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
281                    RXCHK_OTHER_DISC_CNTR),
282         /* RBUF misc statistics */
283         STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
284         STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
285         STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
286         STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
287         STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
288         /* Per TX-queue statistics are dynamically appended */
289 };
290
291 #define BCM_SYSPORT_STATS_LEN   ARRAY_SIZE(bcm_sysport_gstrings_stats)
292
293 static void bcm_sysport_get_drvinfo(struct net_device *dev,
294                                     struct ethtool_drvinfo *info)
295 {
296         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
297         strlcpy(info->version, "0.1", sizeof(info->version));
298         strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
299 }
300
301 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
302 {
303         struct bcm_sysport_priv *priv = netdev_priv(dev);
304
305         return priv->msg_enable;
306 }
307
308 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
309 {
310         struct bcm_sysport_priv *priv = netdev_priv(dev);
311
312         priv->msg_enable = enable;
313 }
314
315 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
316 {
317         switch (type) {
318         case BCM_SYSPORT_STAT_NETDEV:
319         case BCM_SYSPORT_STAT_NETDEV64:
320         case BCM_SYSPORT_STAT_RXCHK:
321         case BCM_SYSPORT_STAT_RBUF:
322         case BCM_SYSPORT_STAT_SOFT:
323                 return true;
324         default:
325                 return false;
326         }
327 }
328
329 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
330 {
331         struct bcm_sysport_priv *priv = netdev_priv(dev);
332         const struct bcm_sysport_stats *s;
333         unsigned int i, j;
334
335         switch (string_set) {
336         case ETH_SS_STATS:
337                 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
338                         s = &bcm_sysport_gstrings_stats[i];
339                         if (priv->is_lite &&
340                             !bcm_sysport_lite_stat_valid(s->type))
341                                 continue;
342                         j++;
343                 }
344                 /* Include per-queue statistics */
345                 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
346         default:
347                 return -EOPNOTSUPP;
348         }
349 }
350
351 static void bcm_sysport_get_strings(struct net_device *dev,
352                                     u32 stringset, u8 *data)
353 {
354         struct bcm_sysport_priv *priv = netdev_priv(dev);
355         const struct bcm_sysport_stats *s;
356         char buf[128];
357         int i, j;
358
359         switch (stringset) {
360         case ETH_SS_STATS:
361                 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
362                         s = &bcm_sysport_gstrings_stats[i];
363                         if (priv->is_lite &&
364                             !bcm_sysport_lite_stat_valid(s->type))
365                                 continue;
366
367                         memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
368                                ETH_GSTRING_LEN);
369                         j++;
370                 }
371
372                 for (i = 0; i < dev->num_tx_queues; i++) {
373                         snprintf(buf, sizeof(buf), "txq%d_packets", i);
374                         memcpy(data + j * ETH_GSTRING_LEN, buf,
375                                ETH_GSTRING_LEN);
376                         j++;
377
378                         snprintf(buf, sizeof(buf), "txq%d_bytes", i);
379                         memcpy(data + j * ETH_GSTRING_LEN, buf,
380                                ETH_GSTRING_LEN);
381                         j++;
382                 }
383                 break;
384         default:
385                 break;
386         }
387 }
388
389 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
390 {
391         int i, j = 0;
392
393         for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
394                 const struct bcm_sysport_stats *s;
395                 u8 offset = 0;
396                 u32 val = 0;
397                 char *p;
398
399                 s = &bcm_sysport_gstrings_stats[i];
400                 switch (s->type) {
401                 case BCM_SYSPORT_STAT_NETDEV:
402                 case BCM_SYSPORT_STAT_NETDEV64:
403                 case BCM_SYSPORT_STAT_SOFT:
404                         continue;
405                 case BCM_SYSPORT_STAT_MIB_RX:
406                 case BCM_SYSPORT_STAT_MIB_TX:
407                 case BCM_SYSPORT_STAT_RUNT:
408                         if (priv->is_lite)
409                                 continue;
410
411                         if (s->type != BCM_SYSPORT_STAT_MIB_RX)
412                                 offset = UMAC_MIB_STAT_OFFSET;
413                         val = umac_readl(priv, UMAC_MIB_START + j + offset);
414                         break;
415                 case BCM_SYSPORT_STAT_RXCHK:
416                         val = rxchk_readl(priv, s->reg_offset);
417                         if (val == ~0)
418                                 rxchk_writel(priv, 0, s->reg_offset);
419                         break;
420                 case BCM_SYSPORT_STAT_RBUF:
421                         val = rbuf_readl(priv, s->reg_offset);
422                         if (val == ~0)
423                                 rbuf_writel(priv, 0, s->reg_offset);
424                         break;
425                 }
426
427                 j += s->stat_sizeof;
428                 p = (char *)priv + s->stat_offset;
429                 *(u32 *)p = val;
430         }
431
432         netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
433 }
434
435 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
436                                         u64 *tx_bytes, u64 *tx_packets)
437 {
438         struct bcm_sysport_tx_ring *ring;
439         u64 bytes = 0, packets = 0;
440         unsigned int start;
441         unsigned int q;
442
443         for (q = 0; q < priv->netdev->num_tx_queues; q++) {
444                 ring = &priv->tx_rings[q];
445                 do {
446                         start = u64_stats_fetch_begin_irq(&priv->syncp);
447                         bytes = ring->bytes;
448                         packets = ring->packets;
449                 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
450
451                 *tx_bytes += bytes;
452                 *tx_packets += packets;
453         }
454 }
455
456 static void bcm_sysport_get_stats(struct net_device *dev,
457                                   struct ethtool_stats *stats, u64 *data)
458 {
459         struct bcm_sysport_priv *priv = netdev_priv(dev);
460         struct bcm_sysport_stats64 *stats64 = &priv->stats64;
461         struct u64_stats_sync *syncp = &priv->syncp;
462         struct bcm_sysport_tx_ring *ring;
463         u64 tx_bytes = 0, tx_packets = 0;
464         unsigned int start;
465         int i, j;
466
467         if (netif_running(dev)) {
468                 bcm_sysport_update_mib_counters(priv);
469                 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
470                 stats64->tx_bytes = tx_bytes;
471                 stats64->tx_packets = tx_packets;
472         }
473
474         for (i =  0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
475                 const struct bcm_sysport_stats *s;
476                 char *p;
477
478                 s = &bcm_sysport_gstrings_stats[i];
479                 if (s->type == BCM_SYSPORT_STAT_NETDEV)
480                         p = (char *)&dev->stats;
481                 else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
482                         p = (char *)stats64;
483                 else
484                         p = (char *)priv;
485
486                 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
487                         continue;
488                 p += s->stat_offset;
489
490                 if (s->stat_sizeof == sizeof(u64) &&
491                     s->type == BCM_SYSPORT_STAT_NETDEV64) {
492                         do {
493                                 start = u64_stats_fetch_begin_irq(syncp);
494                                 data[i] = *(u64 *)p;
495                         } while (u64_stats_fetch_retry_irq(syncp, start));
496                 } else
497                         data[i] = *(u32 *)p;
498                 j++;
499         }
500
501         /* For SYSTEMPORT Lite since we have holes in our statistics, j would
502          * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
503          * needs to point to how many total statistics we have minus the
504          * number of per TX queue statistics
505          */
506         j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
507             dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
508
509         for (i = 0; i < dev->num_tx_queues; i++) {
510                 ring = &priv->tx_rings[i];
511                 data[j] = ring->packets;
512                 j++;
513                 data[j] = ring->bytes;
514                 j++;
515         }
516 }
517
518 static void bcm_sysport_get_wol(struct net_device *dev,
519                                 struct ethtool_wolinfo *wol)
520 {
521         struct bcm_sysport_priv *priv = netdev_priv(dev);
522         u32 reg;
523
524         wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
525         wol->wolopts = priv->wolopts;
526
527         if (!(priv->wolopts & WAKE_MAGICSECURE))
528                 return;
529
530         /* Return the programmed SecureOn password */
531         reg = umac_readl(priv, UMAC_PSW_MS);
532         put_unaligned_be16(reg, &wol->sopass[0]);
533         reg = umac_readl(priv, UMAC_PSW_LS);
534         put_unaligned_be32(reg, &wol->sopass[2]);
535 }
536
537 static int bcm_sysport_set_wol(struct net_device *dev,
538                                struct ethtool_wolinfo *wol)
539 {
540         struct bcm_sysport_priv *priv = netdev_priv(dev);
541         struct device *kdev = &priv->pdev->dev;
542         u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
543
544         if (!device_can_wakeup(kdev))
545                 return -ENOTSUPP;
546
547         if (wol->wolopts & ~supported)
548                 return -EINVAL;
549
550         /* Program the SecureOn password */
551         if (wol->wolopts & WAKE_MAGICSECURE) {
552                 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
553                             UMAC_PSW_MS);
554                 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
555                             UMAC_PSW_LS);
556         }
557
558         /* Flag the device and relevant IRQ as wakeup capable */
559         if (wol->wolopts) {
560                 device_set_wakeup_enable(kdev, 1);
561                 if (priv->wol_irq_disabled)
562                         enable_irq_wake(priv->wol_irq);
563                 priv->wol_irq_disabled = 0;
564         } else {
565                 device_set_wakeup_enable(kdev, 0);
566                 /* Avoid unbalanced disable_irq_wake calls */
567                 if (!priv->wol_irq_disabled)
568                         disable_irq_wake(priv->wol_irq);
569                 priv->wol_irq_disabled = 1;
570         }
571
572         priv->wolopts = wol->wolopts;
573
574         return 0;
575 }
576
577 static int bcm_sysport_get_coalesce(struct net_device *dev,
578                                     struct ethtool_coalesce *ec)
579 {
580         struct bcm_sysport_priv *priv = netdev_priv(dev);
581         u32 reg;
582
583         reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
584
585         ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
586         ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
587
588         reg = rdma_readl(priv, RDMA_MBDONE_INTR);
589
590         ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
591         ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
592
593         return 0;
594 }
595
596 static int bcm_sysport_set_coalesce(struct net_device *dev,
597                                     struct ethtool_coalesce *ec)
598 {
599         struct bcm_sysport_priv *priv = netdev_priv(dev);
600         unsigned int i;
601         u32 reg;
602
603         /* Base system clock is 125Mhz, DMA timeout is this reference clock
604          * divided by 1024, which yield roughly 8.192 us, our maximum value has
605          * to fit in the RING_TIMEOUT_MASK (16 bits).
606          */
607         if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
608             ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
609             ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
610             ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
611                 return -EINVAL;
612
613         if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
614             (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
615                 return -EINVAL;
616
617         for (i = 0; i < dev->num_tx_queues; i++) {
618                 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
619                 reg &= ~(RING_INTR_THRESH_MASK |
620                          RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
621                 reg |= ec->tx_max_coalesced_frames;
622                 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
623                          RING_TIMEOUT_SHIFT;
624                 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
625         }
626
627         reg = rdma_readl(priv, RDMA_MBDONE_INTR);
628         reg &= ~(RDMA_INTR_THRESH_MASK |
629                  RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
630         reg |= ec->rx_max_coalesced_frames;
631         reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
632                             RDMA_TIMEOUT_SHIFT;
633         rdma_writel(priv, reg, RDMA_MBDONE_INTR);
634
635         return 0;
636 }
637
638 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
639 {
640         dev_consume_skb_any(cb->skb);
641         cb->skb = NULL;
642         dma_unmap_addr_set(cb, dma_addr, 0);
643 }
644
645 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
646                                              struct bcm_sysport_cb *cb)
647 {
648         struct device *kdev = &priv->pdev->dev;
649         struct net_device *ndev = priv->netdev;
650         struct sk_buff *skb, *rx_skb;
651         dma_addr_t mapping;
652
653         /* Allocate a new SKB for a new packet */
654         skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
655         if (!skb) {
656                 priv->mib.alloc_rx_buff_failed++;
657                 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
658                 return NULL;
659         }
660
661         mapping = dma_map_single(kdev, skb->data,
662                                  RX_BUF_LENGTH, DMA_FROM_DEVICE);
663         if (dma_mapping_error(kdev, mapping)) {
664                 priv->mib.rx_dma_failed++;
665                 dev_kfree_skb_any(skb);
666                 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
667                 return NULL;
668         }
669
670         /* Grab the current SKB on the ring */
671         rx_skb = cb->skb;
672         if (likely(rx_skb))
673                 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
674                                  RX_BUF_LENGTH, DMA_FROM_DEVICE);
675
676         /* Put the new SKB on the ring */
677         cb->skb = skb;
678         dma_unmap_addr_set(cb, dma_addr, mapping);
679         dma_desc_set_addr(priv, cb->bd_addr, mapping);
680
681         netif_dbg(priv, rx_status, ndev, "RX refill\n");
682
683         /* Return the current SKB to the caller */
684         return rx_skb;
685 }
686
687 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
688 {
689         struct bcm_sysport_cb *cb;
690         struct sk_buff *skb;
691         unsigned int i;
692
693         for (i = 0; i < priv->num_rx_bds; i++) {
694                 cb = &priv->rx_cbs[i];
695                 skb = bcm_sysport_rx_refill(priv, cb);
696                 if (skb)
697                         dev_kfree_skb(skb);
698                 if (!cb->skb)
699                         return -ENOMEM;
700         }
701
702         return 0;
703 }
704
705 /* Poll the hardware for up to budget packets to process */
706 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
707                                         unsigned int budget)
708 {
709         struct bcm_sysport_stats64 *stats64 = &priv->stats64;
710         struct net_device *ndev = priv->netdev;
711         unsigned int processed = 0, to_process;
712         struct bcm_sysport_cb *cb;
713         struct sk_buff *skb;
714         unsigned int p_index;
715         u16 len, status;
716         struct bcm_rsb *rsb;
717
718         /* Clear status before servicing to reduce spurious interrupts */
719         intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
720
721         /* Determine how much we should process since last call, SYSTEMPORT Lite
722          * groups the producer and consumer indexes into the same 32-bit
723          * which we access using RDMA_CONS_INDEX
724          */
725         if (!priv->is_lite)
726                 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
727         else
728                 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
729         p_index &= RDMA_PROD_INDEX_MASK;
730
731         to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
732
733         netif_dbg(priv, rx_status, ndev,
734                   "p_index=%d rx_c_index=%d to_process=%d\n",
735                   p_index, priv->rx_c_index, to_process);
736
737         while ((processed < to_process) && (processed < budget)) {
738                 cb = &priv->rx_cbs[priv->rx_read_ptr];
739                 skb = bcm_sysport_rx_refill(priv, cb);
740
741
742                 /* We do not have a backing SKB, so we do not a corresponding
743                  * DMA mapping for this incoming packet since
744                  * bcm_sysport_rx_refill always either has both skb and mapping
745                  * or none.
746                  */
747                 if (unlikely(!skb)) {
748                         netif_err(priv, rx_err, ndev, "out of memory!\n");
749                         ndev->stats.rx_dropped++;
750                         ndev->stats.rx_errors++;
751                         goto next;
752                 }
753
754                 /* Extract the Receive Status Block prepended */
755                 rsb = (struct bcm_rsb *)skb->data;
756                 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
757                 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
758                           DESC_STATUS_MASK;
759
760                 netif_dbg(priv, rx_status, ndev,
761                           "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
762                           p_index, priv->rx_c_index, priv->rx_read_ptr,
763                           len, status);
764
765                 if (unlikely(len > RX_BUF_LENGTH)) {
766                         netif_err(priv, rx_status, ndev, "oversized packet\n");
767                         ndev->stats.rx_length_errors++;
768                         ndev->stats.rx_errors++;
769                         dev_kfree_skb_any(skb);
770                         goto next;
771                 }
772
773                 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
774                         netif_err(priv, rx_status, ndev, "fragmented packet!\n");
775                         ndev->stats.rx_dropped++;
776                         ndev->stats.rx_errors++;
777                         dev_kfree_skb_any(skb);
778                         goto next;
779                 }
780
781                 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
782                         netif_err(priv, rx_err, ndev, "error packet\n");
783                         if (status & RX_STATUS_OVFLOW)
784                                 ndev->stats.rx_over_errors++;
785                         ndev->stats.rx_dropped++;
786                         ndev->stats.rx_errors++;
787                         dev_kfree_skb_any(skb);
788                         goto next;
789                 }
790
791                 skb_put(skb, len);
792
793                 /* Hardware validated our checksum */
794                 if (likely(status & DESC_L4_CSUM))
795                         skb->ip_summed = CHECKSUM_UNNECESSARY;
796
797                 /* Hardware pre-pends packets with 2bytes before Ethernet
798                  * header plus we have the Receive Status Block, strip off all
799                  * of this from the SKB.
800                  */
801                 skb_pull(skb, sizeof(*rsb) + 2);
802                 len -= (sizeof(*rsb) + 2);
803
804                 /* UniMAC may forward CRC */
805                 if (priv->crc_fwd) {
806                         skb_trim(skb, len - ETH_FCS_LEN);
807                         len -= ETH_FCS_LEN;
808                 }
809
810                 skb->protocol = eth_type_trans(skb, ndev);
811                 ndev->stats.rx_packets++;
812                 ndev->stats.rx_bytes += len;
813                 u64_stats_update_begin(&priv->syncp);
814                 stats64->rx_packets++;
815                 stats64->rx_bytes += len;
816                 u64_stats_update_end(&priv->syncp);
817
818                 napi_gro_receive(&priv->napi, skb);
819 next:
820                 processed++;
821                 priv->rx_read_ptr++;
822
823                 if (priv->rx_read_ptr == priv->num_rx_bds)
824                         priv->rx_read_ptr = 0;
825         }
826
827         return processed;
828 }
829
830 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
831                                        struct bcm_sysport_cb *cb,
832                                        unsigned int *bytes_compl,
833                                        unsigned int *pkts_compl)
834 {
835         struct bcm_sysport_priv *priv = ring->priv;
836         struct device *kdev = &priv->pdev->dev;
837
838         if (cb->skb) {
839                 *bytes_compl += cb->skb->len;
840                 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
841                                  dma_unmap_len(cb, dma_len),
842                                  DMA_TO_DEVICE);
843                 (*pkts_compl)++;
844                 bcm_sysport_free_cb(cb);
845         /* SKB fragment */
846         } else if (dma_unmap_addr(cb, dma_addr)) {
847                 *bytes_compl += dma_unmap_len(cb, dma_len);
848                 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
849                                dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
850                 dma_unmap_addr_set(cb, dma_addr, 0);
851         }
852 }
853
854 /* Reclaim queued SKBs for transmission completion, lockless version */
855 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
856                                              struct bcm_sysport_tx_ring *ring)
857 {
858         unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
859         unsigned int pkts_compl = 0, bytes_compl = 0;
860         struct net_device *ndev = priv->netdev;
861         struct bcm_sysport_cb *cb;
862         u32 hw_ind;
863
864         /* Clear status before servicing to reduce spurious interrupts */
865         if (!ring->priv->is_lite)
866                 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
867         else
868                 intrl2_0_writel(ring->priv, BIT(ring->index +
869                                 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
870
871         /* Compute how many descriptors have been processed since last call */
872         hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
873         c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
874         ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
875
876         last_c_index = ring->c_index;
877         num_tx_cbs = ring->size;
878
879         c_index &= (num_tx_cbs - 1);
880
881         if (c_index >= last_c_index)
882                 last_tx_cn = c_index - last_c_index;
883         else
884                 last_tx_cn = num_tx_cbs - last_c_index + c_index;
885
886         netif_dbg(priv, tx_done, ndev,
887                   "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
888                   ring->index, c_index, last_tx_cn, last_c_index);
889
890         while (last_tx_cn-- > 0) {
891                 cb = ring->cbs + last_c_index;
892                 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
893
894                 ring->desc_count++;
895                 last_c_index++;
896                 last_c_index &= (num_tx_cbs - 1);
897         }
898
899         u64_stats_update_begin(&priv->syncp);
900         ring->packets += pkts_compl;
901         ring->bytes += bytes_compl;
902         u64_stats_update_end(&priv->syncp);
903
904         ring->c_index = c_index;
905
906         netif_dbg(priv, tx_done, ndev,
907                   "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
908                   ring->index, ring->c_index, pkts_compl, bytes_compl);
909
910         return pkts_compl;
911 }
912
913 /* Locked version of the per-ring TX reclaim routine */
914 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
915                                            struct bcm_sysport_tx_ring *ring)
916 {
917         struct netdev_queue *txq;
918         unsigned int released;
919         unsigned long flags;
920
921         txq = netdev_get_tx_queue(priv->netdev, ring->index);
922
923         spin_lock_irqsave(&ring->lock, flags);
924         released = __bcm_sysport_tx_reclaim(priv, ring);
925         if (released)
926                 netif_tx_wake_queue(txq);
927
928         spin_unlock_irqrestore(&ring->lock, flags);
929
930         return released;
931 }
932
933 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
934 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
935                                  struct bcm_sysport_tx_ring *ring)
936 {
937         unsigned long flags;
938
939         spin_lock_irqsave(&ring->lock, flags);
940         __bcm_sysport_tx_reclaim(priv, ring);
941         spin_unlock_irqrestore(&ring->lock, flags);
942 }
943
944 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
945 {
946         struct bcm_sysport_tx_ring *ring =
947                 container_of(napi, struct bcm_sysport_tx_ring, napi);
948         unsigned int work_done = 0;
949
950         work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
951
952         if (work_done == 0) {
953                 napi_complete(napi);
954                 /* re-enable TX interrupt */
955                 if (!ring->priv->is_lite)
956                         intrl2_1_mask_clear(ring->priv, BIT(ring->index));
957                 else
958                         intrl2_0_mask_clear(ring->priv, BIT(ring->index +
959                                             INTRL2_0_TDMA_MBDONE_SHIFT));
960
961                 return 0;
962         }
963
964         return budget;
965 }
966
967 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
968 {
969         unsigned int q;
970
971         for (q = 0; q < priv->netdev->num_tx_queues; q++)
972                 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
973 }
974
975 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
976 {
977         struct bcm_sysport_priv *priv =
978                 container_of(napi, struct bcm_sysport_priv, napi);
979         unsigned int work_done = 0;
980
981         work_done = bcm_sysport_desc_rx(priv, budget);
982
983         priv->rx_c_index += work_done;
984         priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
985
986         /* SYSTEMPORT Lite groups the producer/consumer index, producer is
987          * maintained by HW, but writes to it will be ignore while RDMA
988          * is active
989          */
990         if (!priv->is_lite)
991                 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
992         else
993                 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
994
995         if (work_done < budget) {
996                 napi_complete_done(napi, work_done);
997                 /* re-enable RX interrupts */
998                 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
999         }
1000
1001         return work_done;
1002 }
1003
1004 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1005 {
1006         u32 reg;
1007
1008         /* Stop monitoring MPD interrupt */
1009         intrl2_0_mask_set(priv, INTRL2_0_MPD);
1010
1011         /* Clear the MagicPacket detection logic */
1012         reg = umac_readl(priv, UMAC_MPD_CTRL);
1013         reg &= ~MPD_EN;
1014         umac_writel(priv, reg, UMAC_MPD_CTRL);
1015
1016         netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1017 }
1018
1019 /* RX and misc interrupt routine */
1020 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1021 {
1022         struct net_device *dev = dev_id;
1023         struct bcm_sysport_priv *priv = netdev_priv(dev);
1024         struct bcm_sysport_tx_ring *txr;
1025         unsigned int ring, ring_bit;
1026
1027         priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1028                           ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1029         intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1030
1031         if (unlikely(priv->irq0_stat == 0)) {
1032                 netdev_warn(priv->netdev, "spurious RX interrupt\n");
1033                 return IRQ_NONE;
1034         }
1035
1036         if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1037                 if (likely(napi_schedule_prep(&priv->napi))) {
1038                         /* disable RX interrupts */
1039                         intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1040                         __napi_schedule_irqoff(&priv->napi);
1041                 }
1042         }
1043
1044         /* TX ring is full, perform a full reclaim since we do not know
1045          * which one would trigger this interrupt
1046          */
1047         if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1048                 bcm_sysport_tx_reclaim_all(priv);
1049
1050         if (priv->irq0_stat & INTRL2_0_MPD) {
1051                 netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
1052                 bcm_sysport_resume_from_wol(priv);
1053         }
1054
1055         if (!priv->is_lite)
1056                 goto out;
1057
1058         for (ring = 0; ring < dev->num_tx_queues; ring++) {
1059                 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1060                 if (!(priv->irq0_stat & ring_bit))
1061                         continue;
1062
1063                 txr = &priv->tx_rings[ring];
1064
1065                 if (likely(napi_schedule_prep(&txr->napi))) {
1066                         intrl2_0_mask_set(priv, ring_bit);
1067                         __napi_schedule(&txr->napi);
1068                 }
1069         }
1070 out:
1071         return IRQ_HANDLED;
1072 }
1073
1074 /* TX interrupt service routine */
1075 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1076 {
1077         struct net_device *dev = dev_id;
1078         struct bcm_sysport_priv *priv = netdev_priv(dev);
1079         struct bcm_sysport_tx_ring *txr;
1080         unsigned int ring;
1081
1082         priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1083                                 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1084         intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1085
1086         if (unlikely(priv->irq1_stat == 0)) {
1087                 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1088                 return IRQ_NONE;
1089         }
1090
1091         for (ring = 0; ring < dev->num_tx_queues; ring++) {
1092                 if (!(priv->irq1_stat & BIT(ring)))
1093                         continue;
1094
1095                 txr = &priv->tx_rings[ring];
1096
1097                 if (likely(napi_schedule_prep(&txr->napi))) {
1098                         intrl2_1_mask_set(priv, BIT(ring));
1099                         __napi_schedule_irqoff(&txr->napi);
1100                 }
1101         }
1102
1103         return IRQ_HANDLED;
1104 }
1105
1106 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1107 {
1108         struct bcm_sysport_priv *priv = dev_id;
1109
1110         pm_wakeup_event(&priv->pdev->dev, 0);
1111
1112         return IRQ_HANDLED;
1113 }
1114
1115 #ifdef CONFIG_NET_POLL_CONTROLLER
1116 static void bcm_sysport_poll_controller(struct net_device *dev)
1117 {
1118         struct bcm_sysport_priv *priv = netdev_priv(dev);
1119
1120         disable_irq(priv->irq0);
1121         bcm_sysport_rx_isr(priv->irq0, priv);
1122         enable_irq(priv->irq0);
1123
1124         if (!priv->is_lite) {
1125                 disable_irq(priv->irq1);
1126                 bcm_sysport_tx_isr(priv->irq1, priv);
1127                 enable_irq(priv->irq1);
1128         }
1129 }
1130 #endif
1131
1132 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1133                                               struct net_device *dev)
1134 {
1135         struct sk_buff *nskb;
1136         struct bcm_tsb *tsb;
1137         u32 csum_info;
1138         u8 ip_proto;
1139         u16 csum_start;
1140         u16 ip_ver;
1141
1142         /* Re-allocate SKB if needed */
1143         if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1144                 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1145                 dev_kfree_skb(skb);
1146                 if (!nskb) {
1147                         dev->stats.tx_errors++;
1148                         dev->stats.tx_dropped++;
1149                         return NULL;
1150                 }
1151                 skb = nskb;
1152         }
1153
1154         tsb = skb_push(skb, sizeof(*tsb));
1155         /* Zero-out TSB by default */
1156         memset(tsb, 0, sizeof(*tsb));
1157
1158         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1159                 ip_ver = htons(skb->protocol);
1160                 switch (ip_ver) {
1161                 case ETH_P_IP:
1162                         ip_proto = ip_hdr(skb)->protocol;
1163                         break;
1164                 case ETH_P_IPV6:
1165                         ip_proto = ipv6_hdr(skb)->nexthdr;
1166                         break;
1167                 default:
1168                         return skb;
1169                 }
1170
1171                 /* Get the checksum offset and the L4 (transport) offset */
1172                 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1173                 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1174                 csum_info |= (csum_start << L4_PTR_SHIFT);
1175
1176                 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1177                         csum_info |= L4_LENGTH_VALID;
1178                         if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1179                                 csum_info |= L4_UDP;
1180                 } else {
1181                         csum_info = 0;
1182                 }
1183
1184                 tsb->l4_ptr_dest_map = csum_info;
1185         }
1186
1187         return skb;
1188 }
1189
1190 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1191                                     struct net_device *dev)
1192 {
1193         struct bcm_sysport_priv *priv = netdev_priv(dev);
1194         struct device *kdev = &priv->pdev->dev;
1195         struct bcm_sysport_tx_ring *ring;
1196         struct bcm_sysport_cb *cb;
1197         struct netdev_queue *txq;
1198         struct dma_desc *desc;
1199         unsigned int skb_len;
1200         unsigned long flags;
1201         dma_addr_t mapping;
1202         u32 len_status;
1203         u16 queue;
1204         int ret;
1205
1206         queue = skb_get_queue_mapping(skb);
1207         txq = netdev_get_tx_queue(dev, queue);
1208         ring = &priv->tx_rings[queue];
1209
1210         /* lock against tx reclaim in BH context and TX ring full interrupt */
1211         spin_lock_irqsave(&ring->lock, flags);
1212         if (unlikely(ring->desc_count == 0)) {
1213                 netif_tx_stop_queue(txq);
1214                 netdev_err(dev, "queue %d awake and ring full!\n", queue);
1215                 ret = NETDEV_TX_BUSY;
1216                 goto out;
1217         }
1218
1219         /* The Ethernet switch we are interfaced with needs packets to be at
1220          * least 64 bytes (including FCS) otherwise they will be discarded when
1221          * they enter the switch port logic. When Broadcom tags are enabled, we
1222          * need to make sure that packets are at least 68 bytes
1223          * (including FCS and tag) because the length verification is done after
1224          * the Broadcom tag is stripped off the ingress packet.
1225          */
1226         if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
1227                 ret = NETDEV_TX_OK;
1228                 goto out;
1229         }
1230
1231         /* Insert TSB and checksum infos */
1232         if (priv->tsb_en) {
1233                 skb = bcm_sysport_insert_tsb(skb, dev);
1234                 if (!skb) {
1235                         ret = NETDEV_TX_OK;
1236                         goto out;
1237                 }
1238         }
1239
1240         skb_len = skb->len;
1241
1242         mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1243         if (dma_mapping_error(kdev, mapping)) {
1244                 priv->mib.tx_dma_failed++;
1245                 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1246                           skb->data, skb_len);
1247                 ret = NETDEV_TX_OK;
1248                 goto out;
1249         }
1250
1251         /* Remember the SKB for future freeing */
1252         cb = &ring->cbs[ring->curr_desc];
1253         cb->skb = skb;
1254         dma_unmap_addr_set(cb, dma_addr, mapping);
1255         dma_unmap_len_set(cb, dma_len, skb_len);
1256
1257         /* Fetch a descriptor entry from our pool */
1258         desc = ring->desc_cpu;
1259
1260         desc->addr_lo = lower_32_bits(mapping);
1261         len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1262         len_status |= (skb_len << DESC_LEN_SHIFT);
1263         len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1264                        DESC_STATUS_SHIFT;
1265         if (skb->ip_summed == CHECKSUM_PARTIAL)
1266                 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1267
1268         ring->curr_desc++;
1269         if (ring->curr_desc == ring->size)
1270                 ring->curr_desc = 0;
1271         ring->desc_count--;
1272
1273         /* Ensure write completion of the descriptor status/length
1274          * in DRAM before the System Port WRITE_PORT register latches
1275          * the value
1276          */
1277         wmb();
1278         desc->addr_status_len = len_status;
1279         wmb();
1280
1281         /* Write this descriptor address to the RING write port */
1282         tdma_port_write_desc_addr(priv, desc, ring->index);
1283
1284         /* Check ring space and update SW control flow */
1285         if (ring->desc_count == 0)
1286                 netif_tx_stop_queue(txq);
1287
1288         netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1289                   ring->index, ring->desc_count, ring->curr_desc);
1290
1291         ret = NETDEV_TX_OK;
1292 out:
1293         spin_unlock_irqrestore(&ring->lock, flags);
1294         return ret;
1295 }
1296
1297 static void bcm_sysport_tx_timeout(struct net_device *dev)
1298 {
1299         netdev_warn(dev, "transmit timeout!\n");
1300
1301         netif_trans_update(dev);
1302         dev->stats.tx_errors++;
1303
1304         netif_tx_wake_all_queues(dev);
1305 }
1306
1307 /* phylib adjust link callback */
1308 static void bcm_sysport_adj_link(struct net_device *dev)
1309 {
1310         struct bcm_sysport_priv *priv = netdev_priv(dev);
1311         struct phy_device *phydev = dev->phydev;
1312         unsigned int changed = 0;
1313         u32 cmd_bits = 0, reg;
1314
1315         if (priv->old_link != phydev->link) {
1316                 changed = 1;
1317                 priv->old_link = phydev->link;
1318         }
1319
1320         if (priv->old_duplex != phydev->duplex) {
1321                 changed = 1;
1322                 priv->old_duplex = phydev->duplex;
1323         }
1324
1325         if (priv->is_lite)
1326                 goto out;
1327
1328         switch (phydev->speed) {
1329         case SPEED_2500:
1330                 cmd_bits = CMD_SPEED_2500;
1331                 break;
1332         case SPEED_1000:
1333                 cmd_bits = CMD_SPEED_1000;
1334                 break;
1335         case SPEED_100:
1336                 cmd_bits = CMD_SPEED_100;
1337                 break;
1338         case SPEED_10:
1339                 cmd_bits = CMD_SPEED_10;
1340                 break;
1341         default:
1342                 break;
1343         }
1344         cmd_bits <<= CMD_SPEED_SHIFT;
1345
1346         if (phydev->duplex == DUPLEX_HALF)
1347                 cmd_bits |= CMD_HD_EN;
1348
1349         if (priv->old_pause != phydev->pause) {
1350                 changed = 1;
1351                 priv->old_pause = phydev->pause;
1352         }
1353
1354         if (!phydev->pause)
1355                 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1356
1357         if (!changed)
1358                 return;
1359
1360         if (phydev->link) {
1361                 reg = umac_readl(priv, UMAC_CMD);
1362                 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1363                         CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1364                         CMD_TX_PAUSE_IGNORE);
1365                 reg |= cmd_bits;
1366                 umac_writel(priv, reg, UMAC_CMD);
1367         }
1368 out:
1369         if (changed)
1370                 phy_print_status(phydev);
1371 }
1372
1373 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1374                                     unsigned int index)
1375 {
1376         struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1377         struct device *kdev = &priv->pdev->dev;
1378         size_t size;
1379         void *p;
1380         u32 reg;
1381
1382         /* Simple descriptors partitioning for now */
1383         size = 256;
1384
1385         /* We just need one DMA descriptor which is DMA-able, since writing to
1386          * the port will allocate a new descriptor in its internal linked-list
1387          */
1388         p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1389                                 GFP_KERNEL);
1390         if (!p) {
1391                 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1392                 return -ENOMEM;
1393         }
1394
1395         ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1396         if (!ring->cbs) {
1397                 dma_free_coherent(kdev, sizeof(struct dma_desc),
1398                                   ring->desc_cpu, ring->desc_dma);
1399                 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1400                 return -ENOMEM;
1401         }
1402
1403         /* Initialize SW view of the ring */
1404         spin_lock_init(&ring->lock);
1405         ring->priv = priv;
1406         netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1407         ring->index = index;
1408         ring->size = size;
1409         ring->alloc_size = ring->size;
1410         ring->desc_cpu = p;
1411         ring->desc_count = ring->size;
1412         ring->curr_desc = 0;
1413
1414         /* Initialize HW ring */
1415         tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1416         tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1417         tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1418         tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1419         tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
1420         tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1421
1422         /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1423          * with the original definition of ACB_ALGO
1424          */
1425         reg = tdma_readl(priv, TDMA_CONTROL);
1426         if (priv->is_lite)
1427                 reg &= ~BIT(TSB_SWAP1);
1428         /* Set a correct TSB format based on host endian */
1429         if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1430                 reg |= tdma_control_bit(priv, TSB_SWAP0);
1431         else
1432                 reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1433         tdma_writel(priv, reg, TDMA_CONTROL);
1434
1435         /* Program the number of descriptors as MAX_THRESHOLD and half of
1436          * its size for the hysteresis trigger
1437          */
1438         tdma_writel(priv, ring->size |
1439                         1 << RING_HYST_THRESH_SHIFT,
1440                         TDMA_DESC_RING_MAX_HYST(index));
1441
1442         /* Enable the ring queue in the arbiter */
1443         reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1444         reg |= (1 << index);
1445         tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1446
1447         napi_enable(&ring->napi);
1448
1449         netif_dbg(priv, hw, priv->netdev,
1450                   "TDMA cfg, size=%d, desc_cpu=%p\n",
1451                   ring->size, ring->desc_cpu);
1452
1453         return 0;
1454 }
1455
1456 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1457                                      unsigned int index)
1458 {
1459         struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1460         struct device *kdev = &priv->pdev->dev;
1461         u32 reg;
1462
1463         /* Caller should stop the TDMA engine */
1464         reg = tdma_readl(priv, TDMA_STATUS);
1465         if (!(reg & TDMA_DISABLED))
1466                 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1467
1468         /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1469          * fail, so by checking this pointer we know whether the TX ring was
1470          * fully initialized or not.
1471          */
1472         if (!ring->cbs)
1473                 return;
1474
1475         napi_disable(&ring->napi);
1476         netif_napi_del(&ring->napi);
1477
1478         bcm_sysport_tx_clean(priv, ring);
1479
1480         kfree(ring->cbs);
1481         ring->cbs = NULL;
1482
1483         if (ring->desc_dma) {
1484                 dma_free_coherent(kdev, sizeof(struct dma_desc),
1485                                   ring->desc_cpu, ring->desc_dma);
1486                 ring->desc_dma = 0;
1487         }
1488         ring->size = 0;
1489         ring->alloc_size = 0;
1490
1491         netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1492 }
1493
1494 /* RDMA helper */
1495 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1496                                   unsigned int enable)
1497 {
1498         unsigned int timeout = 1000;
1499         u32 reg;
1500
1501         reg = rdma_readl(priv, RDMA_CONTROL);
1502         if (enable)
1503                 reg |= RDMA_EN;
1504         else
1505                 reg &= ~RDMA_EN;
1506         rdma_writel(priv, reg, RDMA_CONTROL);
1507
1508         /* Poll for RMDA disabling completion */
1509         do {
1510                 reg = rdma_readl(priv, RDMA_STATUS);
1511                 if (!!(reg & RDMA_DISABLED) == !enable)
1512                         return 0;
1513                 usleep_range(1000, 2000);
1514         } while (timeout-- > 0);
1515
1516         netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1517
1518         return -ETIMEDOUT;
1519 }
1520
1521 /* TDMA helper */
1522 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1523                                   unsigned int enable)
1524 {
1525         unsigned int timeout = 1000;
1526         u32 reg;
1527
1528         reg = tdma_readl(priv, TDMA_CONTROL);
1529         if (enable)
1530                 reg |= tdma_control_bit(priv, TDMA_EN);
1531         else
1532                 reg &= ~tdma_control_bit(priv, TDMA_EN);
1533         tdma_writel(priv, reg, TDMA_CONTROL);
1534
1535         /* Poll for TMDA disabling completion */
1536         do {
1537                 reg = tdma_readl(priv, TDMA_STATUS);
1538                 if (!!(reg & TDMA_DISABLED) == !enable)
1539                         return 0;
1540
1541                 usleep_range(1000, 2000);
1542         } while (timeout-- > 0);
1543
1544         netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1545
1546         return -ETIMEDOUT;
1547 }
1548
1549 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1550 {
1551         struct bcm_sysport_cb *cb;
1552         u32 reg;
1553         int ret;
1554         int i;
1555
1556         /* Initialize SW view of the RX ring */
1557         priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1558         priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1559         priv->rx_c_index = 0;
1560         priv->rx_read_ptr = 0;
1561         priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1562                                 GFP_KERNEL);
1563         if (!priv->rx_cbs) {
1564                 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1565                 return -ENOMEM;
1566         }
1567
1568         for (i = 0; i < priv->num_rx_bds; i++) {
1569                 cb = priv->rx_cbs + i;
1570                 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1571         }
1572
1573         ret = bcm_sysport_alloc_rx_bufs(priv);
1574         if (ret) {
1575                 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1576                 return ret;
1577         }
1578
1579         /* Initialize HW, ensure RDMA is disabled */
1580         reg = rdma_readl(priv, RDMA_STATUS);
1581         if (!(reg & RDMA_DISABLED))
1582                 rdma_enable_set(priv, 0);
1583
1584         rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1585         rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1586         rdma_writel(priv, 0, RDMA_PROD_INDEX);
1587         rdma_writel(priv, 0, RDMA_CONS_INDEX);
1588         rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1589                           RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1590         /* Operate the queue in ring mode */
1591         rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1592         rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1593         rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1594         rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1595
1596         rdma_writel(priv, 1, RDMA_MBDONE_INTR);
1597
1598         netif_dbg(priv, hw, priv->netdev,
1599                   "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1600                   priv->num_rx_bds, priv->rx_bds);
1601
1602         return 0;
1603 }
1604
1605 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1606 {
1607         struct bcm_sysport_cb *cb;
1608         unsigned int i;
1609         u32 reg;
1610
1611         /* Caller should ensure RDMA is disabled */
1612         reg = rdma_readl(priv, RDMA_STATUS);
1613         if (!(reg & RDMA_DISABLED))
1614                 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1615
1616         for (i = 0; i < priv->num_rx_bds; i++) {
1617                 cb = &priv->rx_cbs[i];
1618                 if (dma_unmap_addr(cb, dma_addr))
1619                         dma_unmap_single(&priv->pdev->dev,
1620                                          dma_unmap_addr(cb, dma_addr),
1621                                          RX_BUF_LENGTH, DMA_FROM_DEVICE);
1622                 bcm_sysport_free_cb(cb);
1623         }
1624
1625         kfree(priv->rx_cbs);
1626         priv->rx_cbs = NULL;
1627
1628         netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1629 }
1630
1631 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1632 {
1633         struct bcm_sysport_priv *priv = netdev_priv(dev);
1634         u32 reg;
1635
1636         if (priv->is_lite)
1637                 return;
1638
1639         reg = umac_readl(priv, UMAC_CMD);
1640         if (dev->flags & IFF_PROMISC)
1641                 reg |= CMD_PROMISC;
1642         else
1643                 reg &= ~CMD_PROMISC;
1644         umac_writel(priv, reg, UMAC_CMD);
1645
1646         /* No support for ALLMULTI */
1647         if (dev->flags & IFF_ALLMULTI)
1648                 return;
1649 }
1650
1651 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1652                                    u32 mask, unsigned int enable)
1653 {
1654         u32 reg;
1655
1656         if (!priv->is_lite) {
1657                 reg = umac_readl(priv, UMAC_CMD);
1658                 if (enable)
1659                         reg |= mask;
1660                 else
1661                         reg &= ~mask;
1662                 umac_writel(priv, reg, UMAC_CMD);
1663         } else {
1664                 reg = gib_readl(priv, GIB_CONTROL);
1665                 if (enable)
1666                         reg |= mask;
1667                 else
1668                         reg &= ~mask;
1669                 gib_writel(priv, reg, GIB_CONTROL);
1670         }
1671
1672         /* UniMAC stops on a packet boundary, wait for a full-sized packet
1673          * to be processed (1 msec).
1674          */
1675         if (enable == 0)
1676                 usleep_range(1000, 2000);
1677 }
1678
1679 static inline void umac_reset(struct bcm_sysport_priv *priv)
1680 {
1681         u32 reg;
1682
1683         if (priv->is_lite)
1684                 return;
1685
1686         reg = umac_readl(priv, UMAC_CMD);
1687         reg |= CMD_SW_RESET;
1688         umac_writel(priv, reg, UMAC_CMD);
1689         udelay(10);
1690         reg = umac_readl(priv, UMAC_CMD);
1691         reg &= ~CMD_SW_RESET;
1692         umac_writel(priv, reg, UMAC_CMD);
1693 }
1694
1695 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1696                              unsigned char *addr)
1697 {
1698         u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1699                     addr[3];
1700         u32 mac1 = (addr[4] << 8) | addr[5];
1701
1702         if (!priv->is_lite) {
1703                 umac_writel(priv, mac0, UMAC_MAC0);
1704                 umac_writel(priv, mac1, UMAC_MAC1);
1705         } else {
1706                 gib_writel(priv, mac0, GIB_MAC0);
1707                 gib_writel(priv, mac1, GIB_MAC1);
1708         }
1709 }
1710
1711 static void topctrl_flush(struct bcm_sysport_priv *priv)
1712 {
1713         topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1714         topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1715         mdelay(1);
1716         topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1717         topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1718 }
1719
1720 static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1721 {
1722         struct bcm_sysport_priv *priv = netdev_priv(dev);
1723         struct sockaddr *addr = p;
1724
1725         if (!is_valid_ether_addr(addr->sa_data))
1726                 return -EINVAL;
1727
1728         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1729
1730         /* interface is disabled, changes to MAC will be reflected on next
1731          * open call
1732          */
1733         if (!netif_running(dev))
1734                 return 0;
1735
1736         umac_set_hw_addr(priv, dev->dev_addr);
1737
1738         return 0;
1739 }
1740
1741 static void bcm_sysport_get_stats64(struct net_device *dev,
1742                                     struct rtnl_link_stats64 *stats)
1743 {
1744         struct bcm_sysport_priv *priv = netdev_priv(dev);
1745         struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1746         unsigned int start;
1747
1748         netdev_stats_to_stats64(stats, &dev->stats);
1749
1750         bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1751                                     &stats->tx_packets);
1752
1753         do {
1754                 start = u64_stats_fetch_begin_irq(&priv->syncp);
1755                 stats->rx_packets = stats64->rx_packets;
1756                 stats->rx_bytes = stats64->rx_bytes;
1757         } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
1758 }
1759
1760 static void bcm_sysport_netif_start(struct net_device *dev)
1761 {
1762         struct bcm_sysport_priv *priv = netdev_priv(dev);
1763
1764         /* Enable NAPI */
1765         napi_enable(&priv->napi);
1766
1767         /* Enable RX interrupt and TX ring full interrupt */
1768         intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1769
1770         phy_start(dev->phydev);
1771
1772         /* Enable TX interrupts for the TXQs */
1773         if (!priv->is_lite)
1774                 intrl2_1_mask_clear(priv, 0xffffffff);
1775         else
1776                 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1777
1778         /* Last call before we start the real business */
1779         netif_tx_start_all_queues(dev);
1780 }
1781
1782 static void rbuf_init(struct bcm_sysport_priv *priv)
1783 {
1784         u32 reg;
1785
1786         reg = rbuf_readl(priv, RBUF_CONTROL);
1787         reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1788         /* Set a correct RSB format on SYSTEMPORT Lite */
1789         if (priv->is_lite)
1790                 reg &= ~RBUF_RSB_SWAP1;
1791
1792         /* Set a correct RSB format based on host endian */
1793         if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1794                 reg |= RBUF_RSB_SWAP0;
1795         else
1796                 reg &= ~RBUF_RSB_SWAP0;
1797         rbuf_writel(priv, reg, RBUF_CONTROL);
1798 }
1799
1800 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1801 {
1802         intrl2_0_mask_set(priv, 0xffffffff);
1803         intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1804         if (!priv->is_lite) {
1805                 intrl2_1_mask_set(priv, 0xffffffff);
1806                 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1807         }
1808 }
1809
1810 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1811 {
1812         u32 reg;
1813
1814         reg = gib_readl(priv, GIB_CONTROL);
1815         /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1816         if (netdev_uses_dsa(priv->netdev)) {
1817                 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1818                 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1819         }
1820         reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
1821         reg |= 12 << GIB_IPG_LEN_SHIFT;
1822         gib_writel(priv, reg, GIB_CONTROL);
1823 }
1824
1825 static int bcm_sysport_open(struct net_device *dev)
1826 {
1827         struct bcm_sysport_priv *priv = netdev_priv(dev);
1828         struct phy_device *phydev;
1829         unsigned int i;
1830         int ret;
1831
1832         /* Reset UniMAC */
1833         umac_reset(priv);
1834
1835         /* Flush TX and RX FIFOs at TOPCTRL level */
1836         topctrl_flush(priv);
1837
1838         /* Disable the UniMAC RX/TX */
1839         umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1840
1841         /* Enable RBUF 2bytes alignment and Receive Status Block */
1842         rbuf_init(priv);
1843
1844         /* Set maximum frame length */
1845         if (!priv->is_lite)
1846                 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1847         else
1848                 gib_set_pad_extension(priv);
1849
1850         /* Set MAC address */
1851         umac_set_hw_addr(priv, dev->dev_addr);
1852
1853         /* Read CRC forward */
1854         if (!priv->is_lite)
1855                 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1856         else
1857                 priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
1858                                    GIB_FCS_STRIP);
1859
1860         phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1861                                 0, priv->phy_interface);
1862         if (!phydev) {
1863                 netdev_err(dev, "could not attach to PHY\n");
1864                 return -ENODEV;
1865         }
1866
1867         /* Reset house keeping link status */
1868         priv->old_duplex = -1;
1869         priv->old_link = -1;
1870         priv->old_pause = -1;
1871
1872         /* mask all interrupts and request them */
1873         bcm_sysport_mask_all_intrs(priv);
1874
1875         ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1876         if (ret) {
1877                 netdev_err(dev, "failed to request RX interrupt\n");
1878                 goto out_phy_disconnect;
1879         }
1880
1881         if (!priv->is_lite) {
1882                 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
1883                                   dev->name, dev);
1884                 if (ret) {
1885                         netdev_err(dev, "failed to request TX interrupt\n");
1886                         goto out_free_irq0;
1887                 }
1888         }
1889
1890         /* Initialize both hardware and software ring */
1891         for (i = 0; i < dev->num_tx_queues; i++) {
1892                 ret = bcm_sysport_init_tx_ring(priv, i);
1893                 if (ret) {
1894                         netdev_err(dev, "failed to initialize TX ring %d\n",
1895                                    i);
1896                         goto out_free_tx_ring;
1897                 }
1898         }
1899
1900         /* Initialize linked-list */
1901         tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1902
1903         /* Initialize RX ring */
1904         ret = bcm_sysport_init_rx_ring(priv);
1905         if (ret) {
1906                 netdev_err(dev, "failed to initialize RX ring\n");
1907                 goto out_free_rx_ring;
1908         }
1909
1910         /* Turn on RDMA */
1911         ret = rdma_enable_set(priv, 1);
1912         if (ret)
1913                 goto out_free_rx_ring;
1914
1915         /* Turn on TDMA */
1916         ret = tdma_enable_set(priv, 1);
1917         if (ret)
1918                 goto out_clear_rx_int;
1919
1920         /* Turn on UniMAC TX/RX */
1921         umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
1922
1923         bcm_sysport_netif_start(dev);
1924
1925         return 0;
1926
1927 out_clear_rx_int:
1928         intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1929 out_free_rx_ring:
1930         bcm_sysport_fini_rx_ring(priv);
1931 out_free_tx_ring:
1932         for (i = 0; i < dev->num_tx_queues; i++)
1933                 bcm_sysport_fini_tx_ring(priv, i);
1934         if (!priv->is_lite)
1935                 free_irq(priv->irq1, dev);
1936 out_free_irq0:
1937         free_irq(priv->irq0, dev);
1938 out_phy_disconnect:
1939         phy_disconnect(phydev);
1940         return ret;
1941 }
1942
1943 static void bcm_sysport_netif_stop(struct net_device *dev)
1944 {
1945         struct bcm_sysport_priv *priv = netdev_priv(dev);
1946
1947         /* stop all software from updating hardware */
1948         netif_tx_stop_all_queues(dev);
1949         napi_disable(&priv->napi);
1950         phy_stop(dev->phydev);
1951
1952         /* mask all interrupts */
1953         bcm_sysport_mask_all_intrs(priv);
1954 }
1955
1956 static int bcm_sysport_stop(struct net_device *dev)
1957 {
1958         struct bcm_sysport_priv *priv = netdev_priv(dev);
1959         unsigned int i;
1960         int ret;
1961
1962         bcm_sysport_netif_stop(dev);
1963
1964         /* Disable UniMAC RX */
1965         umac_enable_set(priv, CMD_RX_EN, 0);
1966
1967         ret = tdma_enable_set(priv, 0);
1968         if (ret) {
1969                 netdev_err(dev, "timeout disabling RDMA\n");
1970                 return ret;
1971         }
1972
1973         /* Wait for a maximum packet size to be drained */
1974         usleep_range(2000, 3000);
1975
1976         ret = rdma_enable_set(priv, 0);
1977         if (ret) {
1978                 netdev_err(dev, "timeout disabling TDMA\n");
1979                 return ret;
1980         }
1981
1982         /* Disable UniMAC TX */
1983         umac_enable_set(priv, CMD_TX_EN, 0);
1984
1985         /* Free RX/TX rings SW structures */
1986         for (i = 0; i < dev->num_tx_queues; i++)
1987                 bcm_sysport_fini_tx_ring(priv, i);
1988         bcm_sysport_fini_rx_ring(priv);
1989
1990         free_irq(priv->irq0, dev);
1991         if (!priv->is_lite)
1992                 free_irq(priv->irq1, dev);
1993
1994         /* Disconnect from PHY */
1995         phy_disconnect(dev->phydev);
1996
1997         return 0;
1998 }
1999
2000 static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2001         .get_drvinfo            = bcm_sysport_get_drvinfo,
2002         .get_msglevel           = bcm_sysport_get_msglvl,
2003         .set_msglevel           = bcm_sysport_set_msglvl,
2004         .get_link               = ethtool_op_get_link,
2005         .get_strings            = bcm_sysport_get_strings,
2006         .get_ethtool_stats      = bcm_sysport_get_stats,
2007         .get_sset_count         = bcm_sysport_get_sset_count,
2008         .get_wol                = bcm_sysport_get_wol,
2009         .set_wol                = bcm_sysport_set_wol,
2010         .get_coalesce           = bcm_sysport_get_coalesce,
2011         .set_coalesce           = bcm_sysport_set_coalesce,
2012         .get_link_ksettings     = phy_ethtool_get_link_ksettings,
2013         .set_link_ksettings     = phy_ethtool_set_link_ksettings,
2014 };
2015
2016 static const struct net_device_ops bcm_sysport_netdev_ops = {
2017         .ndo_start_xmit         = bcm_sysport_xmit,
2018         .ndo_tx_timeout         = bcm_sysport_tx_timeout,
2019         .ndo_open               = bcm_sysport_open,
2020         .ndo_stop               = bcm_sysport_stop,
2021         .ndo_set_features       = bcm_sysport_set_features,
2022         .ndo_set_rx_mode        = bcm_sysport_set_rx_mode,
2023         .ndo_set_mac_address    = bcm_sysport_change_mac,
2024 #ifdef CONFIG_NET_POLL_CONTROLLER
2025         .ndo_poll_controller    = bcm_sysport_poll_controller,
2026 #endif
2027         .ndo_get_stats64        = bcm_sysport_get_stats64,
2028 };
2029
2030 #define REV_FMT "v%2x.%02x"
2031
2032 static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
2033         [SYSTEMPORT] = {
2034                 .is_lite = false,
2035                 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
2036         },
2037         [SYSTEMPORT_LITE] = {
2038                 .is_lite = true,
2039                 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
2040         },
2041 };
2042
2043 static const struct of_device_id bcm_sysport_of_match[] = {
2044         { .compatible = "brcm,systemportlite-v1.00",
2045           .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
2046         { .compatible = "brcm,systemport-v1.00",
2047           .data = &bcm_sysport_params[SYSTEMPORT] },
2048         { .compatible = "brcm,systemport",
2049           .data = &bcm_sysport_params[SYSTEMPORT] },
2050         { /* sentinel */ }
2051 };
2052 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2053
2054 static int bcm_sysport_probe(struct platform_device *pdev)
2055 {
2056         const struct bcm_sysport_hw_params *params;
2057         const struct of_device_id *of_id = NULL;
2058         struct bcm_sysport_priv *priv;
2059         struct device_node *dn;
2060         struct net_device *dev;
2061         const void *macaddr;
2062         struct resource *r;
2063         u32 txq, rxq;
2064         int ret;
2065
2066         dn = pdev->dev.of_node;
2067         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2068         of_id = of_match_node(bcm_sysport_of_match, dn);
2069         if (!of_id || !of_id->data)
2070                 return -EINVAL;
2071
2072         /* Fairly quickly we need to know the type of adapter we have */
2073         params = of_id->data;
2074
2075         /* Read the Transmit/Receive Queue properties */
2076         if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2077                 txq = TDMA_NUM_RINGS;
2078         if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2079                 rxq = 1;
2080
2081         /* Sanity check the number of transmit queues */
2082         if (!txq || txq > TDMA_NUM_RINGS)
2083                 return -EINVAL;
2084
2085         dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2086         if (!dev)
2087                 return -ENOMEM;
2088
2089         /* Initialize private members */
2090         priv = netdev_priv(dev);
2091
2092         /* Allocate number of TX rings */
2093         priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2094                                       sizeof(struct bcm_sysport_tx_ring),
2095                                       GFP_KERNEL);
2096         if (!priv->tx_rings)
2097                 return -ENOMEM;
2098
2099         priv->is_lite = params->is_lite;
2100         priv->num_rx_desc_words = params->num_rx_desc_words;
2101
2102         priv->irq0 = platform_get_irq(pdev, 0);
2103         if (!priv->is_lite) {
2104                 priv->irq1 = platform_get_irq(pdev, 1);
2105                 priv->wol_irq = platform_get_irq(pdev, 2);
2106         } else {
2107                 priv->wol_irq = platform_get_irq(pdev, 1);
2108         }
2109         if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2110                 dev_err(&pdev->dev, "invalid interrupts\n");
2111                 ret = -EINVAL;
2112                 goto err_free_netdev;
2113         }
2114
2115         priv->base = devm_ioremap_resource(&pdev->dev, r);
2116         if (IS_ERR(priv->base)) {
2117                 ret = PTR_ERR(priv->base);
2118                 goto err_free_netdev;
2119         }
2120
2121         priv->netdev = dev;
2122         priv->pdev = pdev;
2123
2124         priv->phy_interface = of_get_phy_mode(dn);
2125         /* Default to GMII interface mode */
2126         if (priv->phy_interface < 0)
2127                 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2128
2129         /* In the case of a fixed PHY, the DT node associated
2130          * to the PHY is the Ethernet MAC DT node.
2131          */
2132         if (of_phy_is_fixed_link(dn)) {
2133                 ret = of_phy_register_fixed_link(dn);
2134                 if (ret) {
2135                         dev_err(&pdev->dev, "failed to register fixed PHY\n");
2136                         goto err_free_netdev;
2137                 }
2138
2139                 priv->phy_dn = dn;
2140         }
2141
2142         /* Initialize netdevice members */
2143         macaddr = of_get_mac_address(dn);
2144         if (!macaddr || !is_valid_ether_addr(macaddr)) {
2145                 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
2146                 eth_hw_addr_random(dev);
2147         } else {
2148                 ether_addr_copy(dev->dev_addr, macaddr);
2149         }
2150
2151         SET_NETDEV_DEV(dev, &pdev->dev);
2152         dev_set_drvdata(&pdev->dev, dev);
2153         dev->ethtool_ops = &bcm_sysport_ethtool_ops;
2154         dev->netdev_ops = &bcm_sysport_netdev_ops;
2155         netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2156
2157         /* HW supported features, none enabled by default */
2158         dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2159                                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2160
2161         /* Request the WOL interrupt and advertise suspend if available */
2162         priv->wol_irq_disabled = 1;
2163         ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2164                                bcm_sysport_wol_isr, 0, dev->name, priv);
2165         if (!ret)
2166                 device_set_wakeup_capable(&pdev->dev, 1);
2167
2168         /* Set the needed headroom once and for all */
2169         BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2170         dev->needed_headroom += sizeof(struct bcm_tsb);
2171
2172         /* libphy will adjust the link state accordingly */
2173         netif_carrier_off(dev);
2174
2175         u64_stats_init(&priv->syncp);
2176
2177         ret = register_netdev(dev);
2178         if (ret) {
2179                 dev_err(&pdev->dev, "failed to register net_device\n");
2180                 goto err_deregister_fixed_link;
2181         }
2182
2183         priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2184         dev_info(&pdev->dev,
2185                  "Broadcom SYSTEMPORT%s" REV_FMT
2186                  " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2187                  priv->is_lite ? " Lite" : "",
2188                  (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2189                  priv->base, priv->irq0, priv->irq1, txq, rxq);
2190
2191         return 0;
2192
2193 err_deregister_fixed_link:
2194         if (of_phy_is_fixed_link(dn))
2195                 of_phy_deregister_fixed_link(dn);
2196 err_free_netdev:
2197         free_netdev(dev);
2198         return ret;
2199 }
2200
2201 static int bcm_sysport_remove(struct platform_device *pdev)
2202 {
2203         struct net_device *dev = dev_get_drvdata(&pdev->dev);
2204         struct device_node *dn = pdev->dev.of_node;
2205
2206         /* Not much to do, ndo_close has been called
2207          * and we use managed allocations
2208          */
2209         unregister_netdev(dev);
2210         if (of_phy_is_fixed_link(dn))
2211                 of_phy_deregister_fixed_link(dn);
2212         free_netdev(dev);
2213         dev_set_drvdata(&pdev->dev, NULL);
2214
2215         return 0;
2216 }
2217
2218 #ifdef CONFIG_PM_SLEEP
2219 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2220 {
2221         struct net_device *ndev = priv->netdev;
2222         unsigned int timeout = 1000;
2223         u32 reg;
2224
2225         /* Password has already been programmed */
2226         reg = umac_readl(priv, UMAC_MPD_CTRL);
2227         reg |= MPD_EN;
2228         reg &= ~PSW_EN;
2229         if (priv->wolopts & WAKE_MAGICSECURE)
2230                 reg |= PSW_EN;
2231         umac_writel(priv, reg, UMAC_MPD_CTRL);
2232
2233         /* Make sure RBUF entered WoL mode as result */
2234         do {
2235                 reg = rbuf_readl(priv, RBUF_STATUS);
2236                 if (reg & RBUF_WOL_MODE)
2237                         break;
2238
2239                 udelay(10);
2240         } while (timeout-- > 0);
2241
2242         /* Do not leave the UniMAC RBUF matching only MPD packets */
2243         if (!timeout) {
2244                 reg = umac_readl(priv, UMAC_MPD_CTRL);
2245                 reg &= ~MPD_EN;
2246                 umac_writel(priv, reg, UMAC_MPD_CTRL);
2247                 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2248                 return -ETIMEDOUT;
2249         }
2250
2251         /* UniMAC receive needs to be turned on */
2252         umac_enable_set(priv, CMD_RX_EN, 1);
2253
2254         /* Enable the interrupt wake-up source */
2255         intrl2_0_mask_clear(priv, INTRL2_0_MPD);
2256
2257         netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2258
2259         return 0;
2260 }
2261
2262 static int bcm_sysport_suspend(struct device *d)
2263 {
2264         struct net_device *dev = dev_get_drvdata(d);
2265         struct bcm_sysport_priv *priv = netdev_priv(dev);
2266         unsigned int i;
2267         int ret = 0;
2268         u32 reg;
2269
2270         if (!netif_running(dev))
2271                 return 0;
2272
2273         bcm_sysport_netif_stop(dev);
2274
2275         phy_suspend(dev->phydev);
2276
2277         netif_device_detach(dev);
2278
2279         /* Disable UniMAC RX */
2280         umac_enable_set(priv, CMD_RX_EN, 0);
2281
2282         ret = rdma_enable_set(priv, 0);
2283         if (ret) {
2284                 netdev_err(dev, "RDMA timeout!\n");
2285                 return ret;
2286         }
2287
2288         /* Disable RXCHK if enabled */
2289         if (priv->rx_chk_en) {
2290                 reg = rxchk_readl(priv, RXCHK_CONTROL);
2291                 reg &= ~RXCHK_EN;
2292                 rxchk_writel(priv, reg, RXCHK_CONTROL);
2293         }
2294
2295         /* Flush RX pipe */
2296         if (!priv->wolopts)
2297                 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2298
2299         ret = tdma_enable_set(priv, 0);
2300         if (ret) {
2301                 netdev_err(dev, "TDMA timeout!\n");
2302                 return ret;
2303         }
2304
2305         /* Wait for a packet boundary */
2306         usleep_range(2000, 3000);
2307
2308         umac_enable_set(priv, CMD_TX_EN, 0);
2309
2310         topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2311
2312         /* Free RX/TX rings SW structures */
2313         for (i = 0; i < dev->num_tx_queues; i++)
2314                 bcm_sysport_fini_tx_ring(priv, i);
2315         bcm_sysport_fini_rx_ring(priv);
2316
2317         /* Get prepared for Wake-on-LAN */
2318         if (device_may_wakeup(d) && priv->wolopts)
2319                 ret = bcm_sysport_suspend_to_wol(priv);
2320
2321         return ret;
2322 }
2323
2324 static int bcm_sysport_resume(struct device *d)
2325 {
2326         struct net_device *dev = dev_get_drvdata(d);
2327         struct bcm_sysport_priv *priv = netdev_priv(dev);
2328         unsigned int i;
2329         u32 reg;
2330         int ret;
2331
2332         if (!netif_running(dev))
2333                 return 0;
2334
2335         umac_reset(priv);
2336
2337         /* We may have been suspended and never received a WOL event that
2338          * would turn off MPD detection, take care of that now
2339          */
2340         bcm_sysport_resume_from_wol(priv);
2341
2342         /* Initialize both hardware and software ring */
2343         for (i = 0; i < dev->num_tx_queues; i++) {
2344                 ret = bcm_sysport_init_tx_ring(priv, i);
2345                 if (ret) {
2346                         netdev_err(dev, "failed to initialize TX ring %d\n",
2347                                    i);
2348                         goto out_free_tx_rings;
2349                 }
2350         }
2351
2352         /* Initialize linked-list */
2353         tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2354
2355         /* Initialize RX ring */
2356         ret = bcm_sysport_init_rx_ring(priv);
2357         if (ret) {
2358                 netdev_err(dev, "failed to initialize RX ring\n");
2359                 goto out_free_rx_ring;
2360         }
2361
2362         netif_device_attach(dev);
2363
2364         /* RX pipe enable */
2365         topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2366
2367         ret = rdma_enable_set(priv, 1);
2368         if (ret) {
2369                 netdev_err(dev, "failed to enable RDMA\n");
2370                 goto out_free_rx_ring;
2371         }
2372
2373         /* Enable rxhck */
2374         if (priv->rx_chk_en) {
2375                 reg = rxchk_readl(priv, RXCHK_CONTROL);
2376                 reg |= RXCHK_EN;
2377                 rxchk_writel(priv, reg, RXCHK_CONTROL);
2378         }
2379
2380         rbuf_init(priv);
2381
2382         /* Set maximum frame length */
2383         if (!priv->is_lite)
2384                 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2385         else
2386                 gib_set_pad_extension(priv);
2387
2388         /* Set MAC address */
2389         umac_set_hw_addr(priv, dev->dev_addr);
2390
2391         umac_enable_set(priv, CMD_RX_EN, 1);
2392
2393         /* TX pipe enable */
2394         topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2395
2396         umac_enable_set(priv, CMD_TX_EN, 1);
2397
2398         ret = tdma_enable_set(priv, 1);
2399         if (ret) {
2400                 netdev_err(dev, "TDMA timeout!\n");
2401                 goto out_free_rx_ring;
2402         }
2403
2404         phy_resume(dev->phydev);
2405
2406         bcm_sysport_netif_start(dev);
2407
2408         return 0;
2409
2410 out_free_rx_ring:
2411         bcm_sysport_fini_rx_ring(priv);
2412 out_free_tx_rings:
2413         for (i = 0; i < dev->num_tx_queues; i++)
2414                 bcm_sysport_fini_tx_ring(priv, i);
2415         return ret;
2416 }
2417 #endif
2418
2419 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2420                 bcm_sysport_suspend, bcm_sysport_resume);
2421
2422 static struct platform_driver bcm_sysport_driver = {
2423         .probe  = bcm_sysport_probe,
2424         .remove = bcm_sysport_remove,
2425         .driver =  {
2426                 .name = "brcm-systemport",
2427                 .of_match_table = bcm_sysport_of_match,
2428                 .pm = &bcm_sysport_pm_ops,
2429         },
2430 };
2431 module_platform_driver(bcm_sysport_driver);
2432
2433 MODULE_AUTHOR("Broadcom Corporation");
2434 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2435 MODULE_ALIAS("platform:brcm-systemport");
2436 MODULE_LICENSE("GPL");