Merge tag 'media/v4.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / ti / cpsw.c
1 /*
2  * Texas Instruments Ethernet Switch Driver
3  *
4  * Copyright (C) 2012 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/io.h>
18 #include <linux/clk.h>
19 #include <linux/timer.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/irqreturn.h>
23 #include <linux/interrupt.h>
24 #include <linux/if_ether.h>
25 #include <linux/etherdevice.h>
26 #include <linux/netdevice.h>
27 #include <linux/net_tstamp.h>
28 #include <linux/phy.h>
29 #include <linux/workqueue.h>
30 #include <linux/delay.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/gpio.h>
33 #include <linux/of.h>
34 #include <linux/of_mdio.h>
35 #include <linux/of_net.h>
36 #include <linux/of_device.h>
37 #include <linux/if_vlan.h>
38
39 #include <linux/pinctrl/consumer.h>
40
41 #include "cpsw.h"
42 #include "cpsw_ale.h"
43 #include "cpts.h"
44 #include "davinci_cpdma.h"
45
46 #define CPSW_DEBUG      (NETIF_MSG_HW           | NETIF_MSG_WOL         | \
47                          NETIF_MSG_DRV          | NETIF_MSG_LINK        | \
48                          NETIF_MSG_IFUP         | NETIF_MSG_INTR        | \
49                          NETIF_MSG_PROBE        | NETIF_MSG_TIMER       | \
50                          NETIF_MSG_IFDOWN       | NETIF_MSG_RX_ERR      | \
51                          NETIF_MSG_TX_ERR       | NETIF_MSG_TX_DONE     | \
52                          NETIF_MSG_PKTDATA      | NETIF_MSG_TX_QUEUED   | \
53                          NETIF_MSG_RX_STATUS)
54
55 #define cpsw_info(priv, type, format, ...)              \
56 do {                                                            \
57         if (netif_msg_##type(priv) && net_ratelimit())          \
58                 dev_info(priv->dev, format, ## __VA_ARGS__);    \
59 } while (0)
60
61 #define cpsw_err(priv, type, format, ...)               \
62 do {                                                            \
63         if (netif_msg_##type(priv) && net_ratelimit())          \
64                 dev_err(priv->dev, format, ## __VA_ARGS__);     \
65 } while (0)
66
67 #define cpsw_dbg(priv, type, format, ...)               \
68 do {                                                            \
69         if (netif_msg_##type(priv) && net_ratelimit())          \
70                 dev_dbg(priv->dev, format, ## __VA_ARGS__);     \
71 } while (0)
72
73 #define cpsw_notice(priv, type, format, ...)            \
74 do {                                                            \
75         if (netif_msg_##type(priv) && net_ratelimit())          \
76                 dev_notice(priv->dev, format, ## __VA_ARGS__);  \
77 } while (0)
78
79 #define ALE_ALL_PORTS           0x7
80
81 #define CPSW_MAJOR_VERSION(reg)         (reg >> 8 & 0x7)
82 #define CPSW_MINOR_VERSION(reg)         (reg & 0xff)
83 #define CPSW_RTL_VERSION(reg)           ((reg >> 11) & 0x1f)
84
85 #define CPSW_VERSION_1          0x19010a
86 #define CPSW_VERSION_2          0x19010c
87 #define CPSW_VERSION_3          0x19010f
88 #define CPSW_VERSION_4          0x190112
89
90 #define HOST_PORT_NUM           0
91 #define SLIVER_SIZE             0x40
92
93 #define CPSW1_HOST_PORT_OFFSET  0x028
94 #define CPSW1_SLAVE_OFFSET      0x050
95 #define CPSW1_SLAVE_SIZE        0x040
96 #define CPSW1_CPDMA_OFFSET      0x100
97 #define CPSW1_STATERAM_OFFSET   0x200
98 #define CPSW1_HW_STATS          0x400
99 #define CPSW1_CPTS_OFFSET       0x500
100 #define CPSW1_ALE_OFFSET        0x600
101 #define CPSW1_SLIVER_OFFSET     0x700
102
103 #define CPSW2_HOST_PORT_OFFSET  0x108
104 #define CPSW2_SLAVE_OFFSET      0x200
105 #define CPSW2_SLAVE_SIZE        0x100
106 #define CPSW2_CPDMA_OFFSET      0x800
107 #define CPSW2_HW_STATS          0x900
108 #define CPSW2_STATERAM_OFFSET   0xa00
109 #define CPSW2_CPTS_OFFSET       0xc00
110 #define CPSW2_ALE_OFFSET        0xd00
111 #define CPSW2_SLIVER_OFFSET     0xd80
112 #define CPSW2_BD_OFFSET         0x2000
113
114 #define CPDMA_RXTHRESH          0x0c0
115 #define CPDMA_RXFREE            0x0e0
116 #define CPDMA_TXHDP             0x00
117 #define CPDMA_RXHDP             0x20
118 #define CPDMA_TXCP              0x40
119 #define CPDMA_RXCP              0x60
120
121 #define CPSW_POLL_WEIGHT        64
122 #define CPSW_MIN_PACKET_SIZE    60
123 #define CPSW_MAX_PACKET_SIZE    (1500 + 14 + 4 + 4)
124
125 #define RX_PRIORITY_MAPPING     0x76543210
126 #define TX_PRIORITY_MAPPING     0x33221100
127 #define CPDMA_TX_PRIORITY_MAP   0x01234567
128
129 #define CPSW_VLAN_AWARE         BIT(1)
130 #define CPSW_ALE_VLAN_AWARE     1
131
132 #define CPSW_FIFO_NORMAL_MODE           (0 << 16)
133 #define CPSW_FIFO_DUAL_MAC_MODE         (1 << 16)
134 #define CPSW_FIFO_RATE_LIMIT_MODE       (2 << 16)
135
136 #define CPSW_INTPACEEN          (0x3f << 16)
137 #define CPSW_INTPRESCALE_MASK   (0x7FF << 0)
138 #define CPSW_CMINTMAX_CNT       63
139 #define CPSW_CMINTMIN_CNT       2
140 #define CPSW_CMINTMAX_INTVL     (1000 / CPSW_CMINTMIN_CNT)
141 #define CPSW_CMINTMIN_INTVL     ((1000 / CPSW_CMINTMAX_CNT) + 1)
142
143 #define cpsw_slave_index(cpsw, priv)                            \
144                 ((cpsw->data.dual_emac) ? priv->emac_port :     \
145                 cpsw->data.active_slave)
146 #define IRQ_NUM                 2
147 #define CPSW_MAX_QUEUES         8
148 #define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
149
150 static int debug_level;
151 module_param(debug_level, int, 0);
152 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
153
154 static int ale_ageout = 10;
155 module_param(ale_ageout, int, 0);
156 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
157
158 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
159 module_param(rx_packet_max, int, 0);
160 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
161
162 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
163 module_param(descs_pool_size, int, 0444);
164 MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
165
166 struct cpsw_wr_regs {
167         u32     id_ver;
168         u32     soft_reset;
169         u32     control;
170         u32     int_control;
171         u32     rx_thresh_en;
172         u32     rx_en;
173         u32     tx_en;
174         u32     misc_en;
175         u32     mem_allign1[8];
176         u32     rx_thresh_stat;
177         u32     rx_stat;
178         u32     tx_stat;
179         u32     misc_stat;
180         u32     mem_allign2[8];
181         u32     rx_imax;
182         u32     tx_imax;
183
184 };
185
186 struct cpsw_ss_regs {
187         u32     id_ver;
188         u32     control;
189         u32     soft_reset;
190         u32     stat_port_en;
191         u32     ptype;
192         u32     soft_idle;
193         u32     thru_rate;
194         u32     gap_thresh;
195         u32     tx_start_wds;
196         u32     flow_control;
197         u32     vlan_ltype;
198         u32     ts_ltype;
199         u32     dlr_ltype;
200 };
201
202 /* CPSW_PORT_V1 */
203 #define CPSW1_MAX_BLKS      0x00 /* Maximum FIFO Blocks */
204 #define CPSW1_BLK_CNT       0x04 /* FIFO Block Usage Count (Read Only) */
205 #define CPSW1_TX_IN_CTL     0x08 /* Transmit FIFO Control */
206 #define CPSW1_PORT_VLAN     0x0c /* VLAN Register */
207 #define CPSW1_TX_PRI_MAP    0x10 /* Tx Header Priority to Switch Pri Mapping */
208 #define CPSW1_TS_CTL        0x14 /* Time Sync Control */
209 #define CPSW1_TS_SEQ_LTYPE  0x18 /* Time Sync Sequence ID Offset and Msg Type */
210 #define CPSW1_TS_VLAN       0x1c /* Time Sync VLAN1 and VLAN2 */
211
212 /* CPSW_PORT_V2 */
213 #define CPSW2_CONTROL       0x00 /* Control Register */
214 #define CPSW2_MAX_BLKS      0x08 /* Maximum FIFO Blocks */
215 #define CPSW2_BLK_CNT       0x0c /* FIFO Block Usage Count (Read Only) */
216 #define CPSW2_TX_IN_CTL     0x10 /* Transmit FIFO Control */
217 #define CPSW2_PORT_VLAN     0x14 /* VLAN Register */
218 #define CPSW2_TX_PRI_MAP    0x18 /* Tx Header Priority to Switch Pri Mapping */
219 #define CPSW2_TS_SEQ_MTYPE  0x1c /* Time Sync Sequence ID Offset and Msg Type */
220
221 /* CPSW_PORT_V1 and V2 */
222 #define SA_LO               0x20 /* CPGMAC_SL Source Address Low */
223 #define SA_HI               0x24 /* CPGMAC_SL Source Address High */
224 #define SEND_PERCENT        0x28 /* Transmit Queue Send Percentages */
225
226 /* CPSW_PORT_V2 only */
227 #define RX_DSCP_PRI_MAP0    0x30 /* Rx DSCP Priority to Rx Packet Mapping */
228 #define RX_DSCP_PRI_MAP1    0x34 /* Rx DSCP Priority to Rx Packet Mapping */
229 #define RX_DSCP_PRI_MAP2    0x38 /* Rx DSCP Priority to Rx Packet Mapping */
230 #define RX_DSCP_PRI_MAP3    0x3c /* Rx DSCP Priority to Rx Packet Mapping */
231 #define RX_DSCP_PRI_MAP4    0x40 /* Rx DSCP Priority to Rx Packet Mapping */
232 #define RX_DSCP_PRI_MAP5    0x44 /* Rx DSCP Priority to Rx Packet Mapping */
233 #define RX_DSCP_PRI_MAP6    0x48 /* Rx DSCP Priority to Rx Packet Mapping */
234 #define RX_DSCP_PRI_MAP7    0x4c /* Rx DSCP Priority to Rx Packet Mapping */
235
236 /* Bit definitions for the CPSW2_CONTROL register */
237 #define PASS_PRI_TAGGED     (1<<24) /* Pass Priority Tagged */
238 #define VLAN_LTYPE2_EN      (1<<21) /* VLAN LTYPE 2 enable */
239 #define VLAN_LTYPE1_EN      (1<<20) /* VLAN LTYPE 1 enable */
240 #define DSCP_PRI_EN         (1<<16) /* DSCP Priority Enable */
241 #define TS_320              (1<<14) /* Time Sync Dest Port 320 enable */
242 #define TS_319              (1<<13) /* Time Sync Dest Port 319 enable */
243 #define TS_132              (1<<12) /* Time Sync Dest IP Addr 132 enable */
244 #define TS_131              (1<<11) /* Time Sync Dest IP Addr 131 enable */
245 #define TS_130              (1<<10) /* Time Sync Dest IP Addr 130 enable */
246 #define TS_129              (1<<9)  /* Time Sync Dest IP Addr 129 enable */
247 #define TS_TTL_NONZERO      (1<<8)  /* Time Sync Time To Live Non-zero enable */
248 #define TS_ANNEX_F_EN       (1<<6)  /* Time Sync Annex F enable */
249 #define TS_ANNEX_D_EN       (1<<4)  /* Time Sync Annex D enable */
250 #define TS_LTYPE2_EN        (1<<3)  /* Time Sync LTYPE 2 enable */
251 #define TS_LTYPE1_EN        (1<<2)  /* Time Sync LTYPE 1 enable */
252 #define TS_TX_EN            (1<<1)  /* Time Sync Transmit Enable */
253 #define TS_RX_EN            (1<<0)  /* Time Sync Receive Enable */
254
255 #define CTRL_V2_TS_BITS \
256         (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
257          TS_TTL_NONZERO  | TS_ANNEX_D_EN | TS_LTYPE1_EN)
258
259 #define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
260 #define CTRL_V2_TX_TS_BITS  (CTRL_V2_TS_BITS | TS_TX_EN)
261 #define CTRL_V2_RX_TS_BITS  (CTRL_V2_TS_BITS | TS_RX_EN)
262
263
264 #define CTRL_V3_TS_BITS \
265         (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
266          TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
267          TS_LTYPE1_EN)
268
269 #define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
270 #define CTRL_V3_TX_TS_BITS  (CTRL_V3_TS_BITS | TS_TX_EN)
271 #define CTRL_V3_RX_TS_BITS  (CTRL_V3_TS_BITS | TS_RX_EN)
272
273 /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
274 #define TS_SEQ_ID_OFFSET_SHIFT   (16)    /* Time Sync Sequence ID Offset */
275 #define TS_SEQ_ID_OFFSET_MASK    (0x3f)
276 #define TS_MSG_TYPE_EN_SHIFT     (0)     /* Time Sync Message Type Enable */
277 #define TS_MSG_TYPE_EN_MASK      (0xffff)
278
279 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
280 #define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
281
282 /* Bit definitions for the CPSW1_TS_CTL register */
283 #define CPSW_V1_TS_RX_EN                BIT(0)
284 #define CPSW_V1_TS_TX_EN                BIT(4)
285 #define CPSW_V1_MSG_TYPE_OFS            16
286
287 /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
288 #define CPSW_V1_SEQ_ID_OFS_SHIFT        16
289
290 struct cpsw_host_regs {
291         u32     max_blks;
292         u32     blk_cnt;
293         u32     tx_in_ctl;
294         u32     port_vlan;
295         u32     tx_pri_map;
296         u32     cpdma_tx_pri_map;
297         u32     cpdma_rx_chan_map;
298 };
299
300 struct cpsw_sliver_regs {
301         u32     id_ver;
302         u32     mac_control;
303         u32     mac_status;
304         u32     soft_reset;
305         u32     rx_maxlen;
306         u32     __reserved_0;
307         u32     rx_pause;
308         u32     tx_pause;
309         u32     __reserved_1;
310         u32     rx_pri_map;
311 };
312
313 struct cpsw_hw_stats {
314         u32     rxgoodframes;
315         u32     rxbroadcastframes;
316         u32     rxmulticastframes;
317         u32     rxpauseframes;
318         u32     rxcrcerrors;
319         u32     rxaligncodeerrors;
320         u32     rxoversizedframes;
321         u32     rxjabberframes;
322         u32     rxundersizedframes;
323         u32     rxfragments;
324         u32     __pad_0[2];
325         u32     rxoctets;
326         u32     txgoodframes;
327         u32     txbroadcastframes;
328         u32     txmulticastframes;
329         u32     txpauseframes;
330         u32     txdeferredframes;
331         u32     txcollisionframes;
332         u32     txsinglecollframes;
333         u32     txmultcollframes;
334         u32     txexcessivecollisions;
335         u32     txlatecollisions;
336         u32     txunderrun;
337         u32     txcarriersenseerrors;
338         u32     txoctets;
339         u32     octetframes64;
340         u32     octetframes65t127;
341         u32     octetframes128t255;
342         u32     octetframes256t511;
343         u32     octetframes512t1023;
344         u32     octetframes1024tup;
345         u32     netoctets;
346         u32     rxsofoverruns;
347         u32     rxmofoverruns;
348         u32     rxdmaoverruns;
349 };
350
351 struct cpsw_slave {
352         void __iomem                    *regs;
353         struct cpsw_sliver_regs __iomem *sliver;
354         int                             slave_num;
355         u32                             mac_control;
356         struct cpsw_slave_data          *data;
357         struct phy_device               *phy;
358         struct net_device               *ndev;
359         u32                             port_vlan;
360 };
361
362 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
363 {
364         return __raw_readl(slave->regs + offset);
365 }
366
367 static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
368 {
369         __raw_writel(val, slave->regs + offset);
370 }
371
372 struct cpsw_vector {
373         struct cpdma_chan *ch;
374         int budget;
375 };
376
377 struct cpsw_common {
378         struct device                   *dev;
379         struct cpsw_platform_data       data;
380         struct napi_struct              napi_rx;
381         struct napi_struct              napi_tx;
382         struct cpsw_ss_regs __iomem     *regs;
383         struct cpsw_wr_regs __iomem     *wr_regs;
384         u8 __iomem                      *hw_stats;
385         struct cpsw_host_regs __iomem   *host_port_regs;
386         u32                             version;
387         u32                             coal_intvl;
388         u32                             bus_freq_mhz;
389         int                             rx_packet_max;
390         struct cpsw_slave               *slaves;
391         struct cpdma_ctlr               *dma;
392         struct cpsw_vector              txv[CPSW_MAX_QUEUES];
393         struct cpsw_vector              rxv[CPSW_MAX_QUEUES];
394         struct cpsw_ale                 *ale;
395         bool                            quirk_irq;
396         bool                            rx_irq_disabled;
397         bool                            tx_irq_disabled;
398         u32 irqs_table[IRQ_NUM];
399         struct cpts                     *cpts;
400         int                             rx_ch_num, tx_ch_num;
401         int                             speed;
402         int                             usage_count;
403 };
404
405 struct cpsw_priv {
406         struct net_device               *ndev;
407         struct device                   *dev;
408         u32                             msg_enable;
409         u8                              mac_addr[ETH_ALEN];
410         bool                            rx_pause;
411         bool                            tx_pause;
412         u32 emac_port;
413         struct cpsw_common *cpsw;
414 };
415
416 struct cpsw_stats {
417         char stat_string[ETH_GSTRING_LEN];
418         int type;
419         int sizeof_stat;
420         int stat_offset;
421 };
422
423 enum {
424         CPSW_STATS,
425         CPDMA_RX_STATS,
426         CPDMA_TX_STATS,
427 };
428
429 #define CPSW_STAT(m)            CPSW_STATS,                             \
430                                 sizeof(((struct cpsw_hw_stats *)0)->m), \
431                                 offsetof(struct cpsw_hw_stats, m)
432 #define CPDMA_RX_STAT(m)        CPDMA_RX_STATS,                            \
433                                 sizeof(((struct cpdma_chan_stats *)0)->m), \
434                                 offsetof(struct cpdma_chan_stats, m)
435 #define CPDMA_TX_STAT(m)        CPDMA_TX_STATS,                            \
436                                 sizeof(((struct cpdma_chan_stats *)0)->m), \
437                                 offsetof(struct cpdma_chan_stats, m)
438
439 static const struct cpsw_stats cpsw_gstrings_stats[] = {
440         { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
441         { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
442         { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
443         { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
444         { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
445         { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
446         { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
447         { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
448         { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
449         { "Rx Fragments", CPSW_STAT(rxfragments) },
450         { "Rx Octets", CPSW_STAT(rxoctets) },
451         { "Good Tx Frames", CPSW_STAT(txgoodframes) },
452         { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
453         { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
454         { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
455         { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
456         { "Collisions", CPSW_STAT(txcollisionframes) },
457         { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
458         { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
459         { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
460         { "Late Collisions", CPSW_STAT(txlatecollisions) },
461         { "Tx Underrun", CPSW_STAT(txunderrun) },
462         { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
463         { "Tx Octets", CPSW_STAT(txoctets) },
464         { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
465         { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
466         { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
467         { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
468         { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
469         { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
470         { "Net Octets", CPSW_STAT(netoctets) },
471         { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
472         { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
473         { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
474 };
475
476 static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
477         { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
478         { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
479         { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
480         { "misqueued", CPDMA_RX_STAT(misqueued) },
481         { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
482         { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
483         { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
484         { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
485         { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
486         { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
487         { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
488         { "requeue", CPDMA_RX_STAT(requeue) },
489         { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
490 };
491
492 #define CPSW_STATS_COMMON_LEN   ARRAY_SIZE(cpsw_gstrings_stats)
493 #define CPSW_STATS_CH_LEN       ARRAY_SIZE(cpsw_gstrings_ch_stats)
494
495 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
496 #define napi_to_cpsw(napi)      container_of(napi, struct cpsw_common, napi)
497 #define for_each_slave(priv, func, arg...)                              \
498         do {                                                            \
499                 struct cpsw_slave *slave;                               \
500                 struct cpsw_common *cpsw = (priv)->cpsw;                \
501                 int n;                                                  \
502                 if (cpsw->data.dual_emac)                               \
503                         (func)((cpsw)->slaves + priv->emac_port, ##arg);\
504                 else                                                    \
505                         for (n = cpsw->data.slaves,                     \
506                                         slave = cpsw->slaves;           \
507                                         n; n--)                         \
508                                 (func)(slave++, ##arg);                 \
509         } while (0)
510
511 #define cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb)         \
512         do {                                                            \
513                 if (!cpsw->data.dual_emac)                              \
514                         break;                                          \
515                 if (CPDMA_RX_SOURCE_PORT(status) == 1) {                \
516                         ndev = cpsw->slaves[0].ndev;                    \
517                         skb->dev = ndev;                                \
518                 } else if (CPDMA_RX_SOURCE_PORT(status) == 2) {         \
519                         ndev = cpsw->slaves[1].ndev;                    \
520                         skb->dev = ndev;                                \
521                 }                                                       \
522         } while (0)
523 #define cpsw_add_mcast(cpsw, priv, addr)                                \
524         do {                                                            \
525                 if (cpsw->data.dual_emac) {                             \
526                         struct cpsw_slave *slave = cpsw->slaves +       \
527                                                 priv->emac_port;        \
528                         int slave_port = cpsw_get_slave_port(           \
529                                                 slave->slave_num);      \
530                         cpsw_ale_add_mcast(cpsw->ale, addr,             \
531                                 1 << slave_port | ALE_PORT_HOST,        \
532                                 ALE_VLAN, slave->port_vlan, 0);         \
533                 } else {                                                \
534                         cpsw_ale_add_mcast(cpsw->ale, addr,             \
535                                 ALE_ALL_PORTS,                          \
536                                 0, 0, 0);                               \
537                 }                                                       \
538         } while (0)
539
540 static inline int cpsw_get_slave_port(u32 slave_num)
541 {
542         return slave_num + 1;
543 }
544
545 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
546 {
547         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
548         struct cpsw_ale *ale = cpsw->ale;
549         int i;
550
551         if (cpsw->data.dual_emac) {
552                 bool flag = false;
553
554                 /* Enabling promiscuous mode for one interface will be
555                  * common for both the interface as the interface shares
556                  * the same hardware resource.
557                  */
558                 for (i = 0; i < cpsw->data.slaves; i++)
559                         if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
560                                 flag = true;
561
562                 if (!enable && flag) {
563                         enable = true;
564                         dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
565                 }
566
567                 if (enable) {
568                         /* Enable Bypass */
569                         cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
570
571                         dev_dbg(&ndev->dev, "promiscuity enabled\n");
572                 } else {
573                         /* Disable Bypass */
574                         cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
575                         dev_dbg(&ndev->dev, "promiscuity disabled\n");
576                 }
577         } else {
578                 if (enable) {
579                         unsigned long timeout = jiffies + HZ;
580
581                         /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
582                         for (i = 0; i <= cpsw->data.slaves; i++) {
583                                 cpsw_ale_control_set(ale, i,
584                                                      ALE_PORT_NOLEARN, 1);
585                                 cpsw_ale_control_set(ale, i,
586                                                      ALE_PORT_NO_SA_UPDATE, 1);
587                         }
588
589                         /* Clear All Untouched entries */
590                         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
591                         do {
592                                 cpu_relax();
593                                 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
594                                         break;
595                         } while (time_after(timeout, jiffies));
596                         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
597
598                         /* Clear all mcast from ALE */
599                         cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
600
601                         /* Flood All Unicast Packets to Host port */
602                         cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
603                         dev_dbg(&ndev->dev, "promiscuity enabled\n");
604                 } else {
605                         /* Don't Flood All Unicast Packets to Host port */
606                         cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
607
608                         /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
609                         for (i = 0; i <= cpsw->data.slaves; i++) {
610                                 cpsw_ale_control_set(ale, i,
611                                                      ALE_PORT_NOLEARN, 0);
612                                 cpsw_ale_control_set(ale, i,
613                                                      ALE_PORT_NO_SA_UPDATE, 0);
614                         }
615                         dev_dbg(&ndev->dev, "promiscuity disabled\n");
616                 }
617         }
618 }
619
620 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
621 {
622         struct cpsw_priv *priv = netdev_priv(ndev);
623         struct cpsw_common *cpsw = priv->cpsw;
624         int vid;
625
626         if (cpsw->data.dual_emac)
627                 vid = cpsw->slaves[priv->emac_port].port_vlan;
628         else
629                 vid = cpsw->data.default_vlan;
630
631         if (ndev->flags & IFF_PROMISC) {
632                 /* Enable promiscuous mode */
633                 cpsw_set_promiscious(ndev, true);
634                 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
635                 return;
636         } else {
637                 /* Disable promiscuous mode */
638                 cpsw_set_promiscious(ndev, false);
639         }
640
641         /* Restore allmulti on vlans if necessary */
642         cpsw_ale_set_allmulti(cpsw->ale, priv->ndev->flags & IFF_ALLMULTI);
643
644         /* Clear all mcast from ALE */
645         cpsw_ale_flush_multicast(cpsw->ale, ALE_ALL_PORTS, vid);
646
647         if (!netdev_mc_empty(ndev)) {
648                 struct netdev_hw_addr *ha;
649
650                 /* program multicast address list into ALE register */
651                 netdev_for_each_mc_addr(ha, ndev) {
652                         cpsw_add_mcast(cpsw, priv, (u8 *)ha->addr);
653                 }
654         }
655 }
656
657 static void cpsw_intr_enable(struct cpsw_common *cpsw)
658 {
659         __raw_writel(0xFF, &cpsw->wr_regs->tx_en);
660         __raw_writel(0xFF, &cpsw->wr_regs->rx_en);
661
662         cpdma_ctlr_int_ctrl(cpsw->dma, true);
663         return;
664 }
665
666 static void cpsw_intr_disable(struct cpsw_common *cpsw)
667 {
668         __raw_writel(0, &cpsw->wr_regs->tx_en);
669         __raw_writel(0, &cpsw->wr_regs->rx_en);
670
671         cpdma_ctlr_int_ctrl(cpsw->dma, false);
672         return;
673 }
674
675 static void cpsw_tx_handler(void *token, int len, int status)
676 {
677         struct netdev_queue     *txq;
678         struct sk_buff          *skb = token;
679         struct net_device       *ndev = skb->dev;
680         struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
681
682         /* Check whether the queue is stopped due to stalled tx dma, if the
683          * queue is stopped then start the queue as we have free desc for tx
684          */
685         txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
686         if (unlikely(netif_tx_queue_stopped(txq)))
687                 netif_tx_wake_queue(txq);
688
689         cpts_tx_timestamp(cpsw->cpts, skb);
690         ndev->stats.tx_packets++;
691         ndev->stats.tx_bytes += len;
692         dev_kfree_skb_any(skb);
693 }
694
695 static void cpsw_rx_handler(void *token, int len, int status)
696 {
697         struct cpdma_chan       *ch;
698         struct sk_buff          *skb = token;
699         struct sk_buff          *new_skb;
700         struct net_device       *ndev = skb->dev;
701         int                     ret = 0;
702         struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
703
704         cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb);
705
706         if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
707                 /* In dual emac mode check for all interfaces */
708                 if (cpsw->data.dual_emac && cpsw->usage_count &&
709                     (status >= 0)) {
710                         /* The packet received is for the interface which
711                          * is already down and the other interface is up
712                          * and running, instead of freeing which results
713                          * in reducing of the number of rx descriptor in
714                          * DMA engine, requeue skb back to cpdma.
715                          */
716                         new_skb = skb;
717                         goto requeue;
718                 }
719
720                 /* the interface is going down, skbs are purged */
721                 dev_kfree_skb_any(skb);
722                 return;
723         }
724
725         new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
726         if (new_skb) {
727                 skb_copy_queue_mapping(new_skb, skb);
728                 skb_put(skb, len);
729                 cpts_rx_timestamp(cpsw->cpts, skb);
730                 skb->protocol = eth_type_trans(skb, ndev);
731                 netif_receive_skb(skb);
732                 ndev->stats.rx_bytes += len;
733                 ndev->stats.rx_packets++;
734                 kmemleak_not_leak(new_skb);
735         } else {
736                 ndev->stats.rx_dropped++;
737                 new_skb = skb;
738         }
739
740 requeue:
741         if (netif_dormant(ndev)) {
742                 dev_kfree_skb_any(new_skb);
743                 return;
744         }
745
746         ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
747         ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
748                                 skb_tailroom(new_skb), 0);
749         if (WARN_ON(ret < 0))
750                 dev_kfree_skb_any(new_skb);
751 }
752
753 static void cpsw_split_res(struct net_device *ndev)
754 {
755         struct cpsw_priv *priv = netdev_priv(ndev);
756         u32 consumed_rate = 0, bigest_rate = 0;
757         struct cpsw_common *cpsw = priv->cpsw;
758         struct cpsw_vector *txv = cpsw->txv;
759         int i, ch_weight, rlim_ch_num = 0;
760         int budget, bigest_rate_ch = 0;
761         u32 ch_rate, max_rate;
762         int ch_budget = 0;
763
764         for (i = 0; i < cpsw->tx_ch_num; i++) {
765                 ch_rate = cpdma_chan_get_rate(txv[i].ch);
766                 if (!ch_rate)
767                         continue;
768
769                 rlim_ch_num++;
770                 consumed_rate += ch_rate;
771         }
772
773         if (cpsw->tx_ch_num == rlim_ch_num) {
774                 max_rate = consumed_rate;
775         } else if (!rlim_ch_num) {
776                 ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
777                 bigest_rate = 0;
778                 max_rate = consumed_rate;
779         } else {
780                 max_rate = cpsw->speed * 1000;
781
782                 /* if max_rate is less then expected due to reduced link speed,
783                  * split proportionally according next potential max speed
784                  */
785                 if (max_rate < consumed_rate)
786                         max_rate *= 10;
787
788                 if (max_rate < consumed_rate)
789                         max_rate *= 10;
790
791                 ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
792                 ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
793                             (cpsw->tx_ch_num - rlim_ch_num);
794                 bigest_rate = (max_rate - consumed_rate) /
795                               (cpsw->tx_ch_num - rlim_ch_num);
796         }
797
798         /* split tx weight/budget */
799         budget = CPSW_POLL_WEIGHT;
800         for (i = 0; i < cpsw->tx_ch_num; i++) {
801                 ch_rate = cpdma_chan_get_rate(txv[i].ch);
802                 if (ch_rate) {
803                         txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
804                         if (!txv[i].budget)
805                                 txv[i].budget++;
806                         if (ch_rate > bigest_rate) {
807                                 bigest_rate_ch = i;
808                                 bigest_rate = ch_rate;
809                         }
810
811                         ch_weight = (ch_rate * 100) / max_rate;
812                         if (!ch_weight)
813                                 ch_weight++;
814                         cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
815                 } else {
816                         txv[i].budget = ch_budget;
817                         if (!bigest_rate_ch)
818                                 bigest_rate_ch = i;
819                         cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
820                 }
821
822                 budget -= txv[i].budget;
823         }
824
825         if (budget)
826                 txv[bigest_rate_ch].budget += budget;
827
828         /* split rx budget */
829         budget = CPSW_POLL_WEIGHT;
830         ch_budget = budget / cpsw->rx_ch_num;
831         for (i = 0; i < cpsw->rx_ch_num; i++) {
832                 cpsw->rxv[i].budget = ch_budget;
833                 budget -= ch_budget;
834         }
835
836         if (budget)
837                 cpsw->rxv[0].budget += budget;
838 }
839
840 static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
841 {
842         struct cpsw_common *cpsw = dev_id;
843
844         writel(0, &cpsw->wr_regs->tx_en);
845         cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
846
847         if (cpsw->quirk_irq) {
848                 disable_irq_nosync(cpsw->irqs_table[1]);
849                 cpsw->tx_irq_disabled = true;
850         }
851
852         napi_schedule(&cpsw->napi_tx);
853         return IRQ_HANDLED;
854 }
855
856 static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
857 {
858         struct cpsw_common *cpsw = dev_id;
859
860         cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
861         writel(0, &cpsw->wr_regs->rx_en);
862
863         if (cpsw->quirk_irq) {
864                 disable_irq_nosync(cpsw->irqs_table[0]);
865                 cpsw->rx_irq_disabled = true;
866         }
867
868         napi_schedule(&cpsw->napi_rx);
869         return IRQ_HANDLED;
870 }
871
872 static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
873 {
874         u32                     ch_map;
875         int                     num_tx, cur_budget, ch;
876         struct cpsw_common      *cpsw = napi_to_cpsw(napi_tx);
877         struct cpsw_vector      *txv;
878
879         /* process every unprocessed channel */
880         ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
881         for (ch = 0, num_tx = 0; ch_map; ch_map >>= 1, ch++) {
882                 if (!(ch_map & 0x01))
883                         continue;
884
885                 txv = &cpsw->txv[ch];
886                 if (unlikely(txv->budget > budget - num_tx))
887                         cur_budget = budget - num_tx;
888                 else
889                         cur_budget = txv->budget;
890
891                 num_tx += cpdma_chan_process(txv->ch, cur_budget);
892                 if (num_tx >= budget)
893                         break;
894         }
895
896         if (num_tx < budget) {
897                 napi_complete(napi_tx);
898                 writel(0xff, &cpsw->wr_regs->tx_en);
899                 if (cpsw->quirk_irq && cpsw->tx_irq_disabled) {
900                         cpsw->tx_irq_disabled = false;
901                         enable_irq(cpsw->irqs_table[1]);
902                 }
903         }
904
905         return num_tx;
906 }
907
908 static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
909 {
910         u32                     ch_map;
911         int                     num_rx, cur_budget, ch;
912         struct cpsw_common      *cpsw = napi_to_cpsw(napi_rx);
913         struct cpsw_vector      *rxv;
914
915         /* process every unprocessed channel */
916         ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
917         for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
918                 if (!(ch_map & 0x01))
919                         continue;
920
921                 rxv = &cpsw->rxv[ch];
922                 if (unlikely(rxv->budget > budget - num_rx))
923                         cur_budget = budget - num_rx;
924                 else
925                         cur_budget = rxv->budget;
926
927                 num_rx += cpdma_chan_process(rxv->ch, cur_budget);
928                 if (num_rx >= budget)
929                         break;
930         }
931
932         if (num_rx < budget) {
933                 napi_complete_done(napi_rx, num_rx);
934                 writel(0xff, &cpsw->wr_regs->rx_en);
935                 if (cpsw->quirk_irq && cpsw->rx_irq_disabled) {
936                         cpsw->rx_irq_disabled = false;
937                         enable_irq(cpsw->irqs_table[0]);
938                 }
939         }
940
941         return num_rx;
942 }
943
944 static inline void soft_reset(const char *module, void __iomem *reg)
945 {
946         unsigned long timeout = jiffies + HZ;
947
948         __raw_writel(1, reg);
949         do {
950                 cpu_relax();
951         } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
952
953         WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
954 }
955
956 #define mac_hi(mac)     (((mac)[0] << 0) | ((mac)[1] << 8) |    \
957                          ((mac)[2] << 16) | ((mac)[3] << 24))
958 #define mac_lo(mac)     (((mac)[4] << 0) | ((mac)[5] << 8))
959
960 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
961                                struct cpsw_priv *priv)
962 {
963         slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
964         slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
965 }
966
967 static void _cpsw_adjust_link(struct cpsw_slave *slave,
968                               struct cpsw_priv *priv, bool *link)
969 {
970         struct phy_device       *phy = slave->phy;
971         u32                     mac_control = 0;
972         u32                     slave_port;
973         struct cpsw_common *cpsw = priv->cpsw;
974
975         if (!phy)
976                 return;
977
978         slave_port = cpsw_get_slave_port(slave->slave_num);
979
980         if (phy->link) {
981                 mac_control = cpsw->data.mac_control;
982
983                 /* enable forwarding */
984                 cpsw_ale_control_set(cpsw->ale, slave_port,
985                                      ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
986
987                 if (phy->speed == 1000)
988                         mac_control |= BIT(7);  /* GIGABITEN    */
989                 if (phy->duplex)
990                         mac_control |= BIT(0);  /* FULLDUPLEXEN */
991
992                 /* set speed_in input in case RMII mode is used in 100Mbps */
993                 if (phy->speed == 100)
994                         mac_control |= BIT(15);
995                 else if (phy->speed == 10)
996                         mac_control |= BIT(18); /* In Band mode */
997
998                 if (priv->rx_pause)
999                         mac_control |= BIT(3);
1000
1001                 if (priv->tx_pause)
1002                         mac_control |= BIT(4);
1003
1004                 *link = true;
1005         } else {
1006                 mac_control = 0;
1007                 /* disable forwarding */
1008                 cpsw_ale_control_set(cpsw->ale, slave_port,
1009                                      ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1010         }
1011
1012         if (mac_control != slave->mac_control) {
1013                 phy_print_status(phy);
1014                 __raw_writel(mac_control, &slave->sliver->mac_control);
1015         }
1016
1017         slave->mac_control = mac_control;
1018 }
1019
1020 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
1021 {
1022         int i, speed;
1023
1024         for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
1025                 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
1026                         speed += cpsw->slaves[i].phy->speed;
1027
1028         return speed;
1029 }
1030
1031 static int cpsw_need_resplit(struct cpsw_common *cpsw)
1032 {
1033         int i, rlim_ch_num;
1034         int speed, ch_rate;
1035
1036         /* re-split resources only in case speed was changed */
1037         speed = cpsw_get_common_speed(cpsw);
1038         if (speed == cpsw->speed || !speed)
1039                 return 0;
1040
1041         cpsw->speed = speed;
1042
1043         for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
1044                 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
1045                 if (!ch_rate)
1046                         break;
1047
1048                 rlim_ch_num++;
1049         }
1050
1051         /* cases not dependent on speed */
1052         if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
1053                 return 0;
1054
1055         return 1;
1056 }
1057
1058 static void cpsw_adjust_link(struct net_device *ndev)
1059 {
1060         struct cpsw_priv        *priv = netdev_priv(ndev);
1061         struct cpsw_common      *cpsw = priv->cpsw;
1062         bool                    link = false;
1063
1064         for_each_slave(priv, _cpsw_adjust_link, priv, &link);
1065
1066         if (link) {
1067                 if (cpsw_need_resplit(cpsw))
1068                         cpsw_split_res(ndev);
1069
1070                 netif_carrier_on(ndev);
1071                 if (netif_running(ndev))
1072                         netif_tx_wake_all_queues(ndev);
1073         } else {
1074                 netif_carrier_off(ndev);
1075                 netif_tx_stop_all_queues(ndev);
1076         }
1077 }
1078
1079 static int cpsw_get_coalesce(struct net_device *ndev,
1080                                 struct ethtool_coalesce *coal)
1081 {
1082         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1083
1084         coal->rx_coalesce_usecs = cpsw->coal_intvl;
1085         return 0;
1086 }
1087
1088 static int cpsw_set_coalesce(struct net_device *ndev,
1089                                 struct ethtool_coalesce *coal)
1090 {
1091         struct cpsw_priv *priv = netdev_priv(ndev);
1092         u32 int_ctrl;
1093         u32 num_interrupts = 0;
1094         u32 prescale = 0;
1095         u32 addnl_dvdr = 1;
1096         u32 coal_intvl = 0;
1097         struct cpsw_common *cpsw = priv->cpsw;
1098
1099         coal_intvl = coal->rx_coalesce_usecs;
1100
1101         int_ctrl =  readl(&cpsw->wr_regs->int_control);
1102         prescale = cpsw->bus_freq_mhz * 4;
1103
1104         if (!coal->rx_coalesce_usecs) {
1105                 int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
1106                 goto update_return;
1107         }
1108
1109         if (coal_intvl < CPSW_CMINTMIN_INTVL)
1110                 coal_intvl = CPSW_CMINTMIN_INTVL;
1111
1112         if (coal_intvl > CPSW_CMINTMAX_INTVL) {
1113                 /* Interrupt pacer works with 4us Pulse, we can
1114                  * throttle further by dilating the 4us pulse.
1115                  */
1116                 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
1117
1118                 if (addnl_dvdr > 1) {
1119                         prescale *= addnl_dvdr;
1120                         if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
1121                                 coal_intvl = (CPSW_CMINTMAX_INTVL
1122                                                 * addnl_dvdr);
1123                 } else {
1124                         addnl_dvdr = 1;
1125                         coal_intvl = CPSW_CMINTMAX_INTVL;
1126                 }
1127         }
1128
1129         num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
1130         writel(num_interrupts, &cpsw->wr_regs->rx_imax);
1131         writel(num_interrupts, &cpsw->wr_regs->tx_imax);
1132
1133         int_ctrl |= CPSW_INTPACEEN;
1134         int_ctrl &= (~CPSW_INTPRESCALE_MASK);
1135         int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
1136
1137 update_return:
1138         writel(int_ctrl, &cpsw->wr_regs->int_control);
1139
1140         cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
1141         cpsw->coal_intvl = coal_intvl;
1142
1143         return 0;
1144 }
1145
1146 static int cpsw_get_sset_count(struct net_device *ndev, int sset)
1147 {
1148         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1149
1150         switch (sset) {
1151         case ETH_SS_STATS:
1152                 return (CPSW_STATS_COMMON_LEN +
1153                        (cpsw->rx_ch_num + cpsw->tx_ch_num) *
1154                        CPSW_STATS_CH_LEN);
1155         default:
1156                 return -EOPNOTSUPP;
1157         }
1158 }
1159
1160 static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
1161 {
1162         int ch_stats_len;
1163         int line;
1164         int i;
1165
1166         ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
1167         for (i = 0; i < ch_stats_len; i++) {
1168                 line = i % CPSW_STATS_CH_LEN;
1169                 snprintf(*p, ETH_GSTRING_LEN,
1170                          "%s DMA chan %d: %s", rx_dir ? "Rx" : "Tx",
1171                          i / CPSW_STATS_CH_LEN,
1172                          cpsw_gstrings_ch_stats[line].stat_string);
1173                 *p += ETH_GSTRING_LEN;
1174         }
1175 }
1176
1177 static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1178 {
1179         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1180         u8 *p = data;
1181         int i;
1182
1183         switch (stringset) {
1184         case ETH_SS_STATS:
1185                 for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
1186                         memcpy(p, cpsw_gstrings_stats[i].stat_string,
1187                                ETH_GSTRING_LEN);
1188                         p += ETH_GSTRING_LEN;
1189                 }
1190
1191                 cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
1192                 cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
1193                 break;
1194         }
1195 }
1196
1197 static void cpsw_get_ethtool_stats(struct net_device *ndev,
1198                                     struct ethtool_stats *stats, u64 *data)
1199 {
1200         u8 *p;
1201         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1202         struct cpdma_chan_stats ch_stats;
1203         int i, l, ch;
1204
1205         /* Collect Davinci CPDMA stats for Rx and Tx Channel */
1206         for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
1207                 data[l] = readl(cpsw->hw_stats +
1208                                 cpsw_gstrings_stats[l].stat_offset);
1209
1210         for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1211                 cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
1212                 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1213                         p = (u8 *)&ch_stats +
1214                                 cpsw_gstrings_ch_stats[i].stat_offset;
1215                         data[l] = *(u32 *)p;
1216                 }
1217         }
1218
1219         for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
1220                 cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
1221                 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1222                         p = (u8 *)&ch_stats +
1223                                 cpsw_gstrings_ch_stats[i].stat_offset;
1224                         data[l] = *(u32 *)p;
1225                 }
1226         }
1227 }
1228
1229 static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
1230                                         struct sk_buff *skb,
1231                                         struct cpdma_chan *txch)
1232 {
1233         struct cpsw_common *cpsw = priv->cpsw;
1234
1235         return cpdma_chan_submit(txch, skb, skb->data, skb->len,
1236                                  priv->emac_port + cpsw->data.dual_emac);
1237 }
1238
1239 static inline void cpsw_add_dual_emac_def_ale_entries(
1240                 struct cpsw_priv *priv, struct cpsw_slave *slave,
1241                 u32 slave_port)
1242 {
1243         struct cpsw_common *cpsw = priv->cpsw;
1244         u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
1245
1246         if (cpsw->version == CPSW_VERSION_1)
1247                 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
1248         else
1249                 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
1250         cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
1251                           port_mask, port_mask, 0);
1252         cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1253                            port_mask, ALE_VLAN, slave->port_vlan, 0);
1254         cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1255                            HOST_PORT_NUM, ALE_VLAN |
1256                            ALE_SECURE, slave->port_vlan);
1257 }
1258
1259 static void soft_reset_slave(struct cpsw_slave *slave)
1260 {
1261         char name[32];
1262
1263         snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
1264         soft_reset(name, &slave->sliver->soft_reset);
1265 }
1266
1267 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1268 {
1269         u32 slave_port;
1270         struct cpsw_common *cpsw = priv->cpsw;
1271
1272         soft_reset_slave(slave);
1273
1274         /* setup priority mapping */
1275         __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
1276
1277         switch (cpsw->version) {
1278         case CPSW_VERSION_1:
1279                 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
1280                 break;
1281         case CPSW_VERSION_2:
1282         case CPSW_VERSION_3:
1283         case CPSW_VERSION_4:
1284                 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
1285                 break;
1286         }
1287
1288         /* setup max packet size, and mac address */
1289         __raw_writel(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
1290         cpsw_set_slave_mac(slave, priv);
1291
1292         slave->mac_control = 0; /* no link yet */
1293
1294         slave_port = cpsw_get_slave_port(slave->slave_num);
1295
1296         if (cpsw->data.dual_emac)
1297                 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
1298         else
1299                 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1300                                    1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1301
1302         if (slave->data->phy_node) {
1303                 slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1304                                  &cpsw_adjust_link, 0, slave->data->phy_if);
1305                 if (!slave->phy) {
1306                         dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
1307                                 slave->data->phy_node->full_name,
1308                                 slave->slave_num);
1309                         return;
1310                 }
1311         } else {
1312                 slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
1313                                  &cpsw_adjust_link, slave->data->phy_if);
1314                 if (IS_ERR(slave->phy)) {
1315                         dev_err(priv->dev,
1316                                 "phy \"%s\" not found on slave %d, err %ld\n",
1317                                 slave->data->phy_id, slave->slave_num,
1318                                 PTR_ERR(slave->phy));
1319                         slave->phy = NULL;
1320                         return;
1321                 }
1322         }
1323
1324         phy_attached_info(slave->phy);
1325
1326         phy_start(slave->phy);
1327
1328         /* Configure GMII_SEL register */
1329         cpsw_phy_sel(cpsw->dev, slave->phy->interface, slave->slave_num);
1330 }
1331
1332 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1333 {
1334         struct cpsw_common *cpsw = priv->cpsw;
1335         const int vlan = cpsw->data.default_vlan;
1336         u32 reg;
1337         int i;
1338         int unreg_mcast_mask;
1339
1340         reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
1341                CPSW2_PORT_VLAN;
1342
1343         writel(vlan, &cpsw->host_port_regs->port_vlan);
1344
1345         for (i = 0; i < cpsw->data.slaves; i++)
1346                 slave_write(cpsw->slaves + i, vlan, reg);
1347
1348         if (priv->ndev->flags & IFF_ALLMULTI)
1349                 unreg_mcast_mask = ALE_ALL_PORTS;
1350         else
1351                 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1352
1353         cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
1354                           ALE_ALL_PORTS, ALE_ALL_PORTS,
1355                           unreg_mcast_mask);
1356 }
1357
1358 static void cpsw_init_host_port(struct cpsw_priv *priv)
1359 {
1360         u32 fifo_mode;
1361         u32 control_reg;
1362         struct cpsw_common *cpsw = priv->cpsw;
1363
1364         /* soft reset the controller and initialize ale */
1365         soft_reset("cpsw", &cpsw->regs->soft_reset);
1366         cpsw_ale_start(cpsw->ale);
1367
1368         /* switch to vlan unaware mode */
1369         cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
1370                              CPSW_ALE_VLAN_AWARE);
1371         control_reg = readl(&cpsw->regs->control);
1372         control_reg |= CPSW_VLAN_AWARE;
1373         writel(control_reg, &cpsw->regs->control);
1374         fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1375                      CPSW_FIFO_NORMAL_MODE;
1376         writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
1377
1378         /* setup host port priority mapping */
1379         __raw_writel(CPDMA_TX_PRIORITY_MAP,
1380                      &cpsw->host_port_regs->cpdma_tx_pri_map);
1381         __raw_writel(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
1382
1383         cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
1384                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1385
1386         if (!cpsw->data.dual_emac) {
1387                 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1388                                    0, 0);
1389                 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1390                                    ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
1391         }
1392 }
1393
1394 static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1395 {
1396         struct cpsw_common *cpsw = priv->cpsw;
1397         struct sk_buff *skb;
1398         int ch_buf_num;
1399         int ch, i, ret;
1400
1401         for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1402                 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1403                 for (i = 0; i < ch_buf_num; i++) {
1404                         skb = __netdev_alloc_skb_ip_align(priv->ndev,
1405                                                           cpsw->rx_packet_max,
1406                                                           GFP_KERNEL);
1407                         if (!skb) {
1408                                 cpsw_err(priv, ifup, "cannot allocate skb\n");
1409                                 return -ENOMEM;
1410                         }
1411
1412                         skb_set_queue_mapping(skb, ch);
1413                         ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
1414                                                 skb->data, skb_tailroom(skb),
1415                                                 0);
1416                         if (ret < 0) {
1417                                 cpsw_err(priv, ifup,
1418                                          "cannot submit skb to channel %d rx, error %d\n",
1419                                          ch, ret);
1420                                 kfree_skb(skb);
1421                                 return ret;
1422                         }
1423                         kmemleak_not_leak(skb);
1424                 }
1425
1426                 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1427                           ch, ch_buf_num);
1428         }
1429
1430         return 0;
1431 }
1432
1433 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
1434 {
1435         u32 slave_port;
1436
1437         slave_port = cpsw_get_slave_port(slave->slave_num);
1438
1439         if (!slave->phy)
1440                 return;
1441         phy_stop(slave->phy);
1442         phy_disconnect(slave->phy);
1443         slave->phy = NULL;
1444         cpsw_ale_control_set(cpsw->ale, slave_port,
1445                              ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1446         soft_reset_slave(slave);
1447 }
1448
1449 static int cpsw_ndo_open(struct net_device *ndev)
1450 {
1451         struct cpsw_priv *priv = netdev_priv(ndev);
1452         struct cpsw_common *cpsw = priv->cpsw;
1453         int ret;
1454         u32 reg;
1455
1456         ret = pm_runtime_get_sync(cpsw->dev);
1457         if (ret < 0) {
1458                 pm_runtime_put_noidle(cpsw->dev);
1459                 return ret;
1460         }
1461
1462         netif_carrier_off(ndev);
1463
1464         /* Notify the stack of the actual queue counts. */
1465         ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
1466         if (ret) {
1467                 dev_err(priv->dev, "cannot set real number of tx queues\n");
1468                 goto err_cleanup;
1469         }
1470
1471         ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
1472         if (ret) {
1473                 dev_err(priv->dev, "cannot set real number of rx queues\n");
1474                 goto err_cleanup;
1475         }
1476
1477         reg = cpsw->version;
1478
1479         dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
1480                  CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
1481                  CPSW_RTL_VERSION(reg));
1482
1483         /* Initialize host and slave ports */
1484         if (!cpsw->usage_count)
1485                 cpsw_init_host_port(priv);
1486         for_each_slave(priv, cpsw_slave_open, priv);
1487
1488         /* Add default VLAN */
1489         if (!cpsw->data.dual_emac)
1490                 cpsw_add_default_vlan(priv);
1491         else
1492                 cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
1493                                   ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
1494
1495         /* initialize shared resources for every ndev */
1496         if (!cpsw->usage_count) {
1497                 /* disable priority elevation */
1498                 __raw_writel(0, &cpsw->regs->ptype);
1499
1500                 /* enable statistics collection only on all ports */
1501                 __raw_writel(0x7, &cpsw->regs->stat_port_en);
1502
1503                 /* Enable internal fifo flow control */
1504                 writel(0x7, &cpsw->regs->flow_control);
1505
1506                 napi_enable(&cpsw->napi_rx);
1507                 napi_enable(&cpsw->napi_tx);
1508
1509                 if (cpsw->tx_irq_disabled) {
1510                         cpsw->tx_irq_disabled = false;
1511                         enable_irq(cpsw->irqs_table[1]);
1512                 }
1513
1514                 if (cpsw->rx_irq_disabled) {
1515                         cpsw->rx_irq_disabled = false;
1516                         enable_irq(cpsw->irqs_table[0]);
1517                 }
1518
1519                 ret = cpsw_fill_rx_channels(priv);
1520                 if (ret < 0)
1521                         goto err_cleanup;
1522
1523                 if (cpts_register(cpsw->cpts))
1524                         dev_err(priv->dev, "error registering cpts device\n");
1525
1526         }
1527
1528         /* Enable Interrupt pacing if configured */
1529         if (cpsw->coal_intvl != 0) {
1530                 struct ethtool_coalesce coal;
1531
1532                 coal.rx_coalesce_usecs = cpsw->coal_intvl;
1533                 cpsw_set_coalesce(ndev, &coal);
1534         }
1535
1536         cpdma_ctlr_start(cpsw->dma);
1537         cpsw_intr_enable(cpsw);
1538         cpsw->usage_count++;
1539
1540         return 0;
1541
1542 err_cleanup:
1543         cpdma_ctlr_stop(cpsw->dma);
1544         for_each_slave(priv, cpsw_slave_stop, cpsw);
1545         pm_runtime_put_sync(cpsw->dev);
1546         netif_carrier_off(priv->ndev);
1547         return ret;
1548 }
1549
1550 static int cpsw_ndo_stop(struct net_device *ndev)
1551 {
1552         struct cpsw_priv *priv = netdev_priv(ndev);
1553         struct cpsw_common *cpsw = priv->cpsw;
1554
1555         cpsw_info(priv, ifdown, "shutting down cpsw device\n");
1556         netif_tx_stop_all_queues(priv->ndev);
1557         netif_carrier_off(priv->ndev);
1558
1559         if (cpsw->usage_count <= 1) {
1560                 napi_disable(&cpsw->napi_rx);
1561                 napi_disable(&cpsw->napi_tx);
1562                 cpts_unregister(cpsw->cpts);
1563                 cpsw_intr_disable(cpsw);
1564                 cpdma_ctlr_stop(cpsw->dma);
1565                 cpsw_ale_stop(cpsw->ale);
1566         }
1567         for_each_slave(priv, cpsw_slave_stop, cpsw);
1568
1569         if (cpsw_need_resplit(cpsw))
1570                 cpsw_split_res(ndev);
1571
1572         cpsw->usage_count--;
1573         pm_runtime_put_sync(cpsw->dev);
1574         return 0;
1575 }
1576
1577 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1578                                        struct net_device *ndev)
1579 {
1580         struct cpsw_priv *priv = netdev_priv(ndev);
1581         struct cpsw_common *cpsw = priv->cpsw;
1582         struct netdev_queue *txq;
1583         struct cpdma_chan *txch;
1584         int ret, q_idx;
1585
1586         if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
1587                 cpsw_err(priv, tx_err, "packet pad failed\n");
1588                 ndev->stats.tx_dropped++;
1589                 return NET_XMIT_DROP;
1590         }
1591
1592         if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
1593             cpts_is_tx_enabled(cpsw->cpts))
1594                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1595
1596         skb_tx_timestamp(skb);
1597
1598         q_idx = skb_get_queue_mapping(skb);
1599         if (q_idx >= cpsw->tx_ch_num)
1600                 q_idx = q_idx % cpsw->tx_ch_num;
1601
1602         txch = cpsw->txv[q_idx].ch;
1603         ret = cpsw_tx_packet_submit(priv, skb, txch);
1604         if (unlikely(ret != 0)) {
1605                 cpsw_err(priv, tx_err, "desc submit failed\n");
1606                 goto fail;
1607         }
1608
1609         /* If there is no more tx desc left free then we need to
1610          * tell the kernel to stop sending us tx frames.
1611          */
1612         if (unlikely(!cpdma_check_free_tx_desc(txch))) {
1613                 txq = netdev_get_tx_queue(ndev, q_idx);
1614                 netif_tx_stop_queue(txq);
1615         }
1616
1617         return NETDEV_TX_OK;
1618 fail:
1619         ndev->stats.tx_dropped++;
1620         txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
1621         netif_tx_stop_queue(txq);
1622         return NETDEV_TX_BUSY;
1623 }
1624
1625 #if IS_ENABLED(CONFIG_TI_CPTS)
1626
1627 static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
1628 {
1629         struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
1630         u32 ts_en, seq_id;
1631
1632         if (!cpts_is_tx_enabled(cpsw->cpts) &&
1633             !cpts_is_rx_enabled(cpsw->cpts)) {
1634                 slave_write(slave, 0, CPSW1_TS_CTL);
1635                 return;
1636         }
1637
1638         seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
1639         ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
1640
1641         if (cpts_is_tx_enabled(cpsw->cpts))
1642                 ts_en |= CPSW_V1_TS_TX_EN;
1643
1644         if (cpts_is_rx_enabled(cpsw->cpts))
1645                 ts_en |= CPSW_V1_TS_RX_EN;
1646
1647         slave_write(slave, ts_en, CPSW1_TS_CTL);
1648         slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
1649 }
1650
1651 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
1652 {
1653         struct cpsw_slave *slave;
1654         struct cpsw_common *cpsw = priv->cpsw;
1655         u32 ctrl, mtype;
1656
1657         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1658
1659         ctrl = slave_read(slave, CPSW2_CONTROL);
1660         switch (cpsw->version) {
1661         case CPSW_VERSION_2:
1662                 ctrl &= ~CTRL_V2_ALL_TS_MASK;
1663
1664                 if (cpts_is_tx_enabled(cpsw->cpts))
1665                         ctrl |= CTRL_V2_TX_TS_BITS;
1666
1667                 if (cpts_is_rx_enabled(cpsw->cpts))
1668                         ctrl |= CTRL_V2_RX_TS_BITS;
1669                 break;
1670         case CPSW_VERSION_3:
1671         default:
1672                 ctrl &= ~CTRL_V3_ALL_TS_MASK;
1673
1674                 if (cpts_is_tx_enabled(cpsw->cpts))
1675                         ctrl |= CTRL_V3_TX_TS_BITS;
1676
1677                 if (cpts_is_rx_enabled(cpsw->cpts))
1678                         ctrl |= CTRL_V3_RX_TS_BITS;
1679                 break;
1680         }
1681
1682         mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
1683
1684         slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
1685         slave_write(slave, ctrl, CPSW2_CONTROL);
1686         __raw_writel(ETH_P_1588, &cpsw->regs->ts_ltype);
1687 }
1688
1689 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1690 {
1691         struct cpsw_priv *priv = netdev_priv(dev);
1692         struct hwtstamp_config cfg;
1693         struct cpsw_common *cpsw = priv->cpsw;
1694         struct cpts *cpts = cpsw->cpts;
1695
1696         if (cpsw->version != CPSW_VERSION_1 &&
1697             cpsw->version != CPSW_VERSION_2 &&
1698             cpsw->version != CPSW_VERSION_3)
1699                 return -EOPNOTSUPP;
1700
1701         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1702                 return -EFAULT;
1703
1704         /* reserved for future extensions */
1705         if (cfg.flags)
1706                 return -EINVAL;
1707
1708         if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
1709                 return -ERANGE;
1710
1711         switch (cfg.rx_filter) {
1712         case HWTSTAMP_FILTER_NONE:
1713                 cpts_rx_enable(cpts, 0);
1714                 break;
1715         case HWTSTAMP_FILTER_ALL:
1716         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1717         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1718         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1719                 return -ERANGE;
1720         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1721         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1722         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1723         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1724         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1725         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1726         case HWTSTAMP_FILTER_PTP_V2_EVENT:
1727         case HWTSTAMP_FILTER_PTP_V2_SYNC:
1728         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1729                 cpts_rx_enable(cpts, 1);
1730                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1731                 break;
1732         default:
1733                 return -ERANGE;
1734         }
1735
1736         cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
1737
1738         switch (cpsw->version) {
1739         case CPSW_VERSION_1:
1740                 cpsw_hwtstamp_v1(cpsw);
1741                 break;
1742         case CPSW_VERSION_2:
1743         case CPSW_VERSION_3:
1744                 cpsw_hwtstamp_v2(priv);
1745                 break;
1746         default:
1747                 WARN_ON(1);
1748         }
1749
1750         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1751 }
1752
1753 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1754 {
1755         struct cpsw_common *cpsw = ndev_to_cpsw(dev);
1756         struct cpts *cpts = cpsw->cpts;
1757         struct hwtstamp_config cfg;
1758
1759         if (cpsw->version != CPSW_VERSION_1 &&
1760             cpsw->version != CPSW_VERSION_2 &&
1761             cpsw->version != CPSW_VERSION_3)
1762                 return -EOPNOTSUPP;
1763
1764         cfg.flags = 0;
1765         cfg.tx_type = cpts_is_tx_enabled(cpts) ?
1766                       HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1767         cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
1768                          HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);
1769
1770         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1771 }
1772 #else
1773 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1774 {
1775         return -EOPNOTSUPP;
1776 }
1777
1778 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1779 {
1780         return -EOPNOTSUPP;
1781 }
1782 #endif /*CONFIG_TI_CPTS*/
1783
1784 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1785 {
1786         struct cpsw_priv *priv = netdev_priv(dev);
1787         struct cpsw_common *cpsw = priv->cpsw;
1788         int slave_no = cpsw_slave_index(cpsw, priv);
1789
1790         if (!netif_running(dev))
1791                 return -EINVAL;
1792
1793         switch (cmd) {
1794         case SIOCSHWTSTAMP:
1795                 return cpsw_hwtstamp_set(dev, req);
1796         case SIOCGHWTSTAMP:
1797                 return cpsw_hwtstamp_get(dev, req);
1798         }
1799
1800         if (!cpsw->slaves[slave_no].phy)
1801                 return -EOPNOTSUPP;
1802         return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
1803 }
1804
1805 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1806 {
1807         struct cpsw_priv *priv = netdev_priv(ndev);
1808         struct cpsw_common *cpsw = priv->cpsw;
1809         int ch;
1810
1811         cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
1812         ndev->stats.tx_errors++;
1813         cpsw_intr_disable(cpsw);
1814         for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
1815                 cpdma_chan_stop(cpsw->txv[ch].ch);
1816                 cpdma_chan_start(cpsw->txv[ch].ch);
1817         }
1818
1819         cpsw_intr_enable(cpsw);
1820 }
1821
1822 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1823 {
1824         struct cpsw_priv *priv = netdev_priv(ndev);
1825         struct sockaddr *addr = (struct sockaddr *)p;
1826         struct cpsw_common *cpsw = priv->cpsw;
1827         int flags = 0;
1828         u16 vid = 0;
1829         int ret;
1830
1831         if (!is_valid_ether_addr(addr->sa_data))
1832                 return -EADDRNOTAVAIL;
1833
1834         ret = pm_runtime_get_sync(cpsw->dev);
1835         if (ret < 0) {
1836                 pm_runtime_put_noidle(cpsw->dev);
1837                 return ret;
1838         }
1839
1840         if (cpsw->data.dual_emac) {
1841                 vid = cpsw->slaves[priv->emac_port].port_vlan;
1842                 flags = ALE_VLAN;
1843         }
1844
1845         cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1846                            flags, vid);
1847         cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
1848                            flags, vid);
1849
1850         memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
1851         memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1852         for_each_slave(priv, cpsw_set_slave_mac, priv);
1853
1854         pm_runtime_put(cpsw->dev);
1855
1856         return 0;
1857 }
1858
1859 #ifdef CONFIG_NET_POLL_CONTROLLER
1860 static void cpsw_ndo_poll_controller(struct net_device *ndev)
1861 {
1862         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1863
1864         cpsw_intr_disable(cpsw);
1865         cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
1866         cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
1867         cpsw_intr_enable(cpsw);
1868 }
1869 #endif
1870
1871 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
1872                                 unsigned short vid)
1873 {
1874         int ret;
1875         int unreg_mcast_mask = 0;
1876         u32 port_mask;
1877         struct cpsw_common *cpsw = priv->cpsw;
1878
1879         if (cpsw->data.dual_emac) {
1880                 port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
1881
1882                 if (priv->ndev->flags & IFF_ALLMULTI)
1883                         unreg_mcast_mask = port_mask;
1884         } else {
1885                 port_mask = ALE_ALL_PORTS;
1886
1887                 if (priv->ndev->flags & IFF_ALLMULTI)
1888                         unreg_mcast_mask = ALE_ALL_PORTS;
1889                 else
1890                         unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1891         }
1892
1893         ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
1894                                 unreg_mcast_mask);
1895         if (ret != 0)
1896                 return ret;
1897
1898         ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1899                                  HOST_PORT_NUM, ALE_VLAN, vid);
1900         if (ret != 0)
1901                 goto clean_vid;
1902
1903         ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1904                                  port_mask, ALE_VLAN, vid, 0);
1905         if (ret != 0)
1906                 goto clean_vlan_ucast;
1907         return 0;
1908
1909 clean_vlan_ucast:
1910         cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1911                            HOST_PORT_NUM, ALE_VLAN, vid);
1912 clean_vid:
1913         cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1914         return ret;
1915 }
1916
1917 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1918                                     __be16 proto, u16 vid)
1919 {
1920         struct cpsw_priv *priv = netdev_priv(ndev);
1921         struct cpsw_common *cpsw = priv->cpsw;
1922         int ret;
1923
1924         if (vid == cpsw->data.default_vlan)
1925                 return 0;
1926
1927         ret = pm_runtime_get_sync(cpsw->dev);
1928         if (ret < 0) {
1929                 pm_runtime_put_noidle(cpsw->dev);
1930                 return ret;
1931         }
1932
1933         if (cpsw->data.dual_emac) {
1934                 /* In dual EMAC, reserved VLAN id should not be used for
1935                  * creating VLAN interfaces as this can break the dual
1936                  * EMAC port separation
1937                  */
1938                 int i;
1939
1940                 for (i = 0; i < cpsw->data.slaves; i++) {
1941                         if (vid == cpsw->slaves[i].port_vlan)
1942                                 return -EINVAL;
1943                 }
1944         }
1945
1946         dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1947         ret = cpsw_add_vlan_ale_entry(priv, vid);
1948
1949         pm_runtime_put(cpsw->dev);
1950         return ret;
1951 }
1952
1953 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1954                                      __be16 proto, u16 vid)
1955 {
1956         struct cpsw_priv *priv = netdev_priv(ndev);
1957         struct cpsw_common *cpsw = priv->cpsw;
1958         int ret;
1959
1960         if (vid == cpsw->data.default_vlan)
1961                 return 0;
1962
1963         ret = pm_runtime_get_sync(cpsw->dev);
1964         if (ret < 0) {
1965                 pm_runtime_put_noidle(cpsw->dev);
1966                 return ret;
1967         }
1968
1969         if (cpsw->data.dual_emac) {
1970                 int i;
1971
1972                 for (i = 0; i < cpsw->data.slaves; i++) {
1973                         if (vid == cpsw->slaves[i].port_vlan)
1974                                 return -EINVAL;
1975                 }
1976         }
1977
1978         dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1979         ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1980         if (ret != 0)
1981                 return ret;
1982
1983         ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1984                                  HOST_PORT_NUM, ALE_VLAN, vid);
1985         if (ret != 0)
1986                 return ret;
1987
1988         ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
1989                                  0, ALE_VLAN, vid);
1990         pm_runtime_put(cpsw->dev);
1991         return ret;
1992 }
1993
1994 static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
1995 {
1996         struct cpsw_priv *priv = netdev_priv(ndev);
1997         struct cpsw_common *cpsw = priv->cpsw;
1998         struct cpsw_slave *slave;
1999         u32 min_rate;
2000         u32 ch_rate;
2001         int i, ret;
2002
2003         ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
2004         if (ch_rate == rate)
2005                 return 0;
2006
2007         ch_rate = rate * 1000;
2008         min_rate = cpdma_chan_get_min_rate(cpsw->dma);
2009         if ((ch_rate < min_rate && ch_rate)) {
2010                 dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
2011                         min_rate);
2012                 return -EINVAL;
2013         }
2014
2015         if (rate > cpsw->speed) {
2016                 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
2017                 return -EINVAL;
2018         }
2019
2020         ret = pm_runtime_get_sync(cpsw->dev);
2021         if (ret < 0) {
2022                 pm_runtime_put_noidle(cpsw->dev);
2023                 return ret;
2024         }
2025
2026         ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
2027         pm_runtime_put(cpsw->dev);
2028
2029         if (ret)
2030                 return ret;
2031
2032         /* update rates for slaves tx queues */
2033         for (i = 0; i < cpsw->data.slaves; i++) {
2034                 slave = &cpsw->slaves[i];
2035                 if (!slave->ndev)
2036                         continue;
2037
2038                 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
2039         }
2040
2041         cpsw_split_res(ndev);
2042         return ret;
2043 }
2044
2045 static const struct net_device_ops cpsw_netdev_ops = {
2046         .ndo_open               = cpsw_ndo_open,
2047         .ndo_stop               = cpsw_ndo_stop,
2048         .ndo_start_xmit         = cpsw_ndo_start_xmit,
2049         .ndo_set_mac_address    = cpsw_ndo_set_mac_address,
2050         .ndo_do_ioctl           = cpsw_ndo_ioctl,
2051         .ndo_validate_addr      = eth_validate_addr,
2052         .ndo_tx_timeout         = cpsw_ndo_tx_timeout,
2053         .ndo_set_rx_mode        = cpsw_ndo_set_rx_mode,
2054         .ndo_set_tx_maxrate     = cpsw_ndo_set_tx_maxrate,
2055 #ifdef CONFIG_NET_POLL_CONTROLLER
2056         .ndo_poll_controller    = cpsw_ndo_poll_controller,
2057 #endif
2058         .ndo_vlan_rx_add_vid    = cpsw_ndo_vlan_rx_add_vid,
2059         .ndo_vlan_rx_kill_vid   = cpsw_ndo_vlan_rx_kill_vid,
2060 };
2061
2062 static int cpsw_get_regs_len(struct net_device *ndev)
2063 {
2064         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2065
2066         return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
2067 }
2068
2069 static void cpsw_get_regs(struct net_device *ndev,
2070                           struct ethtool_regs *regs, void *p)
2071 {
2072         u32 *reg = p;
2073         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2074
2075         /* update CPSW IP version */
2076         regs->version = cpsw->version;
2077
2078         cpsw_ale_dump(cpsw->ale, reg);
2079 }
2080
2081 static void cpsw_get_drvinfo(struct net_device *ndev,
2082                              struct ethtool_drvinfo *info)
2083 {
2084         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2085         struct platform_device  *pdev = to_platform_device(cpsw->dev);
2086
2087         strlcpy(info->driver, "cpsw", sizeof(info->driver));
2088         strlcpy(info->version, "1.0", sizeof(info->version));
2089         strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
2090 }
2091
2092 static u32 cpsw_get_msglevel(struct net_device *ndev)
2093 {
2094         struct cpsw_priv *priv = netdev_priv(ndev);
2095         return priv->msg_enable;
2096 }
2097
2098 static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
2099 {
2100         struct cpsw_priv *priv = netdev_priv(ndev);
2101         priv->msg_enable = value;
2102 }
2103
2104 #if IS_ENABLED(CONFIG_TI_CPTS)
2105 static int cpsw_get_ts_info(struct net_device *ndev,
2106                             struct ethtool_ts_info *info)
2107 {
2108         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2109
2110         info->so_timestamping =
2111                 SOF_TIMESTAMPING_TX_HARDWARE |
2112                 SOF_TIMESTAMPING_TX_SOFTWARE |
2113                 SOF_TIMESTAMPING_RX_HARDWARE |
2114                 SOF_TIMESTAMPING_RX_SOFTWARE |
2115                 SOF_TIMESTAMPING_SOFTWARE |
2116                 SOF_TIMESTAMPING_RAW_HARDWARE;
2117         info->phc_index = cpsw->cpts->phc_index;
2118         info->tx_types =
2119                 (1 << HWTSTAMP_TX_OFF) |
2120                 (1 << HWTSTAMP_TX_ON);
2121         info->rx_filters =
2122                 (1 << HWTSTAMP_FILTER_NONE) |
2123                 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2124         return 0;
2125 }
2126 #else
2127 static int cpsw_get_ts_info(struct net_device *ndev,
2128                             struct ethtool_ts_info *info)
2129 {
2130         info->so_timestamping =
2131                 SOF_TIMESTAMPING_TX_SOFTWARE |
2132                 SOF_TIMESTAMPING_RX_SOFTWARE |
2133                 SOF_TIMESTAMPING_SOFTWARE;
2134         info->phc_index = -1;
2135         info->tx_types = 0;
2136         info->rx_filters = 0;
2137         return 0;
2138 }
2139 #endif
2140
2141 static int cpsw_get_link_ksettings(struct net_device *ndev,
2142                                    struct ethtool_link_ksettings *ecmd)
2143 {
2144         struct cpsw_priv *priv = netdev_priv(ndev);
2145         struct cpsw_common *cpsw = priv->cpsw;
2146         int slave_no = cpsw_slave_index(cpsw, priv);
2147
2148         if (cpsw->slaves[slave_no].phy)
2149                 return phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy,
2150                                                  ecmd);
2151         else
2152                 return -EOPNOTSUPP;
2153 }
2154
2155 static int cpsw_set_link_ksettings(struct net_device *ndev,
2156                                    const struct ethtool_link_ksettings *ecmd)
2157 {
2158         struct cpsw_priv *priv = netdev_priv(ndev);
2159         struct cpsw_common *cpsw = priv->cpsw;
2160         int slave_no = cpsw_slave_index(cpsw, priv);
2161
2162         if (cpsw->slaves[slave_no].phy)
2163                 return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
2164                                                  ecmd);
2165         else
2166                 return -EOPNOTSUPP;
2167 }
2168
2169 static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2170 {
2171         struct cpsw_priv *priv = netdev_priv(ndev);
2172         struct cpsw_common *cpsw = priv->cpsw;
2173         int slave_no = cpsw_slave_index(cpsw, priv);
2174
2175         wol->supported = 0;
2176         wol->wolopts = 0;
2177
2178         if (cpsw->slaves[slave_no].phy)
2179                 phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
2180 }
2181
2182 static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2183 {
2184         struct cpsw_priv *priv = netdev_priv(ndev);
2185         struct cpsw_common *cpsw = priv->cpsw;
2186         int slave_no = cpsw_slave_index(cpsw, priv);
2187
2188         if (cpsw->slaves[slave_no].phy)
2189                 return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
2190         else
2191                 return -EOPNOTSUPP;
2192 }
2193
2194 static void cpsw_get_pauseparam(struct net_device *ndev,
2195                                 struct ethtool_pauseparam *pause)
2196 {
2197         struct cpsw_priv *priv = netdev_priv(ndev);
2198
2199         pause->autoneg = AUTONEG_DISABLE;
2200         pause->rx_pause = priv->rx_pause ? true : false;
2201         pause->tx_pause = priv->tx_pause ? true : false;
2202 }
2203
2204 static int cpsw_set_pauseparam(struct net_device *ndev,
2205                                struct ethtool_pauseparam *pause)
2206 {
2207         struct cpsw_priv *priv = netdev_priv(ndev);
2208         bool link;
2209
2210         priv->rx_pause = pause->rx_pause ? true : false;
2211         priv->tx_pause = pause->tx_pause ? true : false;
2212
2213         for_each_slave(priv, _cpsw_adjust_link, priv, &link);
2214         return 0;
2215 }
2216
2217 static int cpsw_ethtool_op_begin(struct net_device *ndev)
2218 {
2219         struct cpsw_priv *priv = netdev_priv(ndev);
2220         struct cpsw_common *cpsw = priv->cpsw;
2221         int ret;
2222
2223         ret = pm_runtime_get_sync(cpsw->dev);
2224         if (ret < 0) {
2225                 cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
2226                 pm_runtime_put_noidle(cpsw->dev);
2227         }
2228
2229         return ret;
2230 }
2231
2232 static void cpsw_ethtool_op_complete(struct net_device *ndev)
2233 {
2234         struct cpsw_priv *priv = netdev_priv(ndev);
2235         int ret;
2236
2237         ret = pm_runtime_put(priv->cpsw->dev);
2238         if (ret < 0)
2239                 cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
2240 }
2241
2242 static void cpsw_get_channels(struct net_device *ndev,
2243                               struct ethtool_channels *ch)
2244 {
2245         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2246
2247         ch->max_combined = 0;
2248         ch->max_rx = CPSW_MAX_QUEUES;
2249         ch->max_tx = CPSW_MAX_QUEUES;
2250         ch->max_other = 0;
2251         ch->other_count = 0;
2252         ch->rx_count = cpsw->rx_ch_num;
2253         ch->tx_count = cpsw->tx_ch_num;
2254         ch->combined_count = 0;
2255 }
2256
2257 static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
2258                                   struct ethtool_channels *ch)
2259 {
2260         if (ch->combined_count)
2261                 return -EINVAL;
2262
2263         /* verify we have at least one channel in each direction */
2264         if (!ch->rx_count || !ch->tx_count)
2265                 return -EINVAL;
2266
2267         if (ch->rx_count > cpsw->data.channels ||
2268             ch->tx_count > cpsw->data.channels)
2269                 return -EINVAL;
2270
2271         return 0;
2272 }
2273
2274 static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
2275 {
2276         int (*poll)(struct napi_struct *, int);
2277         struct cpsw_common *cpsw = priv->cpsw;
2278         void (*handler)(void *, int, int);
2279         struct netdev_queue *queue;
2280         struct cpsw_vector *vec;
2281         int ret, *ch;
2282
2283         if (rx) {
2284                 ch = &cpsw->rx_ch_num;
2285                 vec = cpsw->rxv;
2286                 handler = cpsw_rx_handler;
2287                 poll = cpsw_rx_poll;
2288         } else {
2289                 ch = &cpsw->tx_ch_num;
2290                 vec = cpsw->txv;
2291                 handler = cpsw_tx_handler;
2292                 poll = cpsw_tx_poll;
2293         }
2294
2295         while (*ch < ch_num) {
2296                 vec[*ch].ch = cpdma_chan_create(cpsw->dma, *ch, handler, rx);
2297                 queue = netdev_get_tx_queue(priv->ndev, *ch);
2298                 queue->tx_maxrate = 0;
2299
2300                 if (IS_ERR(vec[*ch].ch))
2301                         return PTR_ERR(vec[*ch].ch);
2302
2303                 if (!vec[*ch].ch)
2304                         return -EINVAL;
2305
2306                 cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
2307                           (rx ? "rx" : "tx"));
2308                 (*ch)++;
2309         }
2310
2311         while (*ch > ch_num) {
2312                 (*ch)--;
2313
2314                 ret = cpdma_chan_destroy(vec[*ch].ch);
2315                 if (ret)
2316                         return ret;
2317
2318                 cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
2319                           (rx ? "rx" : "tx"));
2320         }
2321
2322         return 0;
2323 }
2324
2325 static int cpsw_update_channels(struct cpsw_priv *priv,
2326                                 struct ethtool_channels *ch)
2327 {
2328         int ret;
2329
2330         ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
2331         if (ret)
2332                 return ret;
2333
2334         ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
2335         if (ret)
2336                 return ret;
2337
2338         return 0;
2339 }
2340
2341 static void cpsw_suspend_data_pass(struct net_device *ndev)
2342 {
2343         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2344         struct cpsw_slave *slave;
2345         int i;
2346
2347         /* Disable NAPI scheduling */
2348         cpsw_intr_disable(cpsw);
2349
2350         /* Stop all transmit queues for every network device.
2351          * Disable re-using rx descriptors with dormant_on.
2352          */
2353         for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2354                 if (!(slave->ndev && netif_running(slave->ndev)))
2355                         continue;
2356
2357                 netif_tx_stop_all_queues(slave->ndev);
2358                 netif_dormant_on(slave->ndev);
2359         }
2360
2361         /* Handle rest of tx packets and stop cpdma channels */
2362         cpdma_ctlr_stop(cpsw->dma);
2363 }
2364
2365 static int cpsw_resume_data_pass(struct net_device *ndev)
2366 {
2367         struct cpsw_priv *priv = netdev_priv(ndev);
2368         struct cpsw_common *cpsw = priv->cpsw;
2369         struct cpsw_slave *slave;
2370         int i, ret;
2371
2372         /* Allow rx packets handling */
2373         for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
2374                 if (slave->ndev && netif_running(slave->ndev))
2375                         netif_dormant_off(slave->ndev);
2376
2377         /* After this receive is started */
2378         if (cpsw->usage_count) {
2379                 ret = cpsw_fill_rx_channels(priv);
2380                 if (ret)
2381                         return ret;
2382
2383                 cpdma_ctlr_start(cpsw->dma);
2384                 cpsw_intr_enable(cpsw);
2385         }
2386
2387         /* Resume transmit for every affected interface */
2388         for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
2389                 if (slave->ndev && netif_running(slave->ndev))
2390                         netif_tx_start_all_queues(slave->ndev);
2391
2392         return 0;
2393 }
2394
2395 static int cpsw_set_channels(struct net_device *ndev,
2396                              struct ethtool_channels *chs)
2397 {
2398         struct cpsw_priv *priv = netdev_priv(ndev);
2399         struct cpsw_common *cpsw = priv->cpsw;
2400         struct cpsw_slave *slave;
2401         int i, ret;
2402
2403         ret = cpsw_check_ch_settings(cpsw, chs);
2404         if (ret < 0)
2405                 return ret;
2406
2407         cpsw_suspend_data_pass(ndev);
2408         ret = cpsw_update_channels(priv, chs);
2409         if (ret)
2410                 goto err;
2411
2412         for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2413                 if (!(slave->ndev && netif_running(slave->ndev)))
2414                         continue;
2415
2416                 /* Inform stack about new count of queues */
2417                 ret = netif_set_real_num_tx_queues(slave->ndev,
2418                                                    cpsw->tx_ch_num);
2419                 if (ret) {
2420                         dev_err(priv->dev, "cannot set real number of tx queues\n");
2421                         goto err;
2422                 }
2423
2424                 ret = netif_set_real_num_rx_queues(slave->ndev,
2425                                                    cpsw->rx_ch_num);
2426                 if (ret) {
2427                         dev_err(priv->dev, "cannot set real number of rx queues\n");
2428                         goto err;
2429                 }
2430         }
2431
2432         if (cpsw->usage_count)
2433                 cpsw_split_res(ndev);
2434
2435         ret = cpsw_resume_data_pass(ndev);
2436         if (!ret)
2437                 return 0;
2438 err:
2439         dev_err(priv->dev, "cannot update channels number, closing device\n");
2440         dev_close(ndev);
2441         return ret;
2442 }
2443
2444 static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
2445 {
2446         struct cpsw_priv *priv = netdev_priv(ndev);
2447         struct cpsw_common *cpsw = priv->cpsw;
2448         int slave_no = cpsw_slave_index(cpsw, priv);
2449
2450         if (cpsw->slaves[slave_no].phy)
2451                 return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
2452         else
2453                 return -EOPNOTSUPP;
2454 }
2455
2456 static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
2457 {
2458         struct cpsw_priv *priv = netdev_priv(ndev);
2459         struct cpsw_common *cpsw = priv->cpsw;
2460         int slave_no = cpsw_slave_index(cpsw, priv);
2461
2462         if (cpsw->slaves[slave_no].phy)
2463                 return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
2464         else
2465                 return -EOPNOTSUPP;
2466 }
2467
2468 static int cpsw_nway_reset(struct net_device *ndev)
2469 {
2470         struct cpsw_priv *priv = netdev_priv(ndev);
2471         struct cpsw_common *cpsw = priv->cpsw;
2472         int slave_no = cpsw_slave_index(cpsw, priv);
2473
2474         if (cpsw->slaves[slave_no].phy)
2475                 return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
2476         else
2477                 return -EOPNOTSUPP;
2478 }
2479
2480 static void cpsw_get_ringparam(struct net_device *ndev,
2481                                struct ethtool_ringparam *ering)
2482 {
2483         struct cpsw_priv *priv = netdev_priv(ndev);
2484         struct cpsw_common *cpsw = priv->cpsw;
2485
2486         /* not supported */
2487         ering->tx_max_pending = 0;
2488         ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
2489         ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
2490         ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
2491 }
2492
2493 static int cpsw_set_ringparam(struct net_device *ndev,
2494                               struct ethtool_ringparam *ering)
2495 {
2496         struct cpsw_priv *priv = netdev_priv(ndev);
2497         struct cpsw_common *cpsw = priv->cpsw;
2498         int ret;
2499
2500         /* ignore ering->tx_pending - only rx_pending adjustment is supported */
2501
2502         if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
2503             ering->rx_pending < CPSW_MAX_QUEUES ||
2504             ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
2505                 return -EINVAL;
2506
2507         if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
2508                 return 0;
2509
2510         cpsw_suspend_data_pass(ndev);
2511
2512         cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
2513
2514         if (cpsw->usage_count)
2515                 cpdma_chan_split_pool(cpsw->dma);
2516
2517         ret = cpsw_resume_data_pass(ndev);
2518         if (!ret)
2519                 return 0;
2520
2521         dev_err(&ndev->dev, "cannot set ring params, closing device\n");
2522         dev_close(ndev);
2523         return ret;
2524 }
2525
2526 static const struct ethtool_ops cpsw_ethtool_ops = {
2527         .get_drvinfo    = cpsw_get_drvinfo,
2528         .get_msglevel   = cpsw_get_msglevel,
2529         .set_msglevel   = cpsw_set_msglevel,
2530         .get_link       = ethtool_op_get_link,
2531         .get_ts_info    = cpsw_get_ts_info,
2532         .get_coalesce   = cpsw_get_coalesce,
2533         .set_coalesce   = cpsw_set_coalesce,
2534         .get_sset_count         = cpsw_get_sset_count,
2535         .get_strings            = cpsw_get_strings,
2536         .get_ethtool_stats      = cpsw_get_ethtool_stats,
2537         .get_pauseparam         = cpsw_get_pauseparam,
2538         .set_pauseparam         = cpsw_set_pauseparam,
2539         .get_wol        = cpsw_get_wol,
2540         .set_wol        = cpsw_set_wol,
2541         .get_regs_len   = cpsw_get_regs_len,
2542         .get_regs       = cpsw_get_regs,
2543         .begin          = cpsw_ethtool_op_begin,
2544         .complete       = cpsw_ethtool_op_complete,
2545         .get_channels   = cpsw_get_channels,
2546         .set_channels   = cpsw_set_channels,
2547         .get_link_ksettings     = cpsw_get_link_ksettings,
2548         .set_link_ksettings     = cpsw_set_link_ksettings,
2549         .get_eee        = cpsw_get_eee,
2550         .set_eee        = cpsw_set_eee,
2551         .nway_reset     = cpsw_nway_reset,
2552         .get_ringparam = cpsw_get_ringparam,
2553         .set_ringparam = cpsw_set_ringparam,
2554 };
2555
2556 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
2557                             u32 slave_reg_ofs, u32 sliver_reg_ofs)
2558 {
2559         void __iomem            *regs = cpsw->regs;
2560         int                     slave_num = slave->slave_num;
2561         struct cpsw_slave_data  *data = cpsw->data.slave_data + slave_num;
2562
2563         slave->data     = data;
2564         slave->regs     = regs + slave_reg_ofs;
2565         slave->sliver   = regs + sliver_reg_ofs;
2566         slave->port_vlan = data->dual_emac_res_vlan;
2567 }
2568
2569 static int cpsw_probe_dt(struct cpsw_platform_data *data,
2570                          struct platform_device *pdev)
2571 {
2572         struct device_node *node = pdev->dev.of_node;
2573         struct device_node *slave_node;
2574         int i = 0, ret;
2575         u32 prop;
2576
2577         if (!node)
2578                 return -EINVAL;
2579
2580         if (of_property_read_u32(node, "slaves", &prop)) {
2581                 dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
2582                 return -EINVAL;
2583         }
2584         data->slaves = prop;
2585
2586         if (of_property_read_u32(node, "active_slave", &prop)) {
2587                 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
2588                 return -EINVAL;
2589         }
2590         data->active_slave = prop;
2591
2592         data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
2593                                         * sizeof(struct cpsw_slave_data),
2594                                         GFP_KERNEL);
2595         if (!data->slave_data)
2596                 return -ENOMEM;
2597
2598         if (of_property_read_u32(node, "cpdma_channels", &prop)) {
2599                 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
2600                 return -EINVAL;
2601         }
2602         data->channels = prop;
2603
2604         if (of_property_read_u32(node, "ale_entries", &prop)) {
2605                 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
2606                 return -EINVAL;
2607         }
2608         data->ale_entries = prop;
2609
2610         if (of_property_read_u32(node, "bd_ram_size", &prop)) {
2611                 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
2612                 return -EINVAL;
2613         }
2614         data->bd_ram_size = prop;
2615
2616         if (of_property_read_u32(node, "mac_control", &prop)) {
2617                 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
2618                 return -EINVAL;
2619         }
2620         data->mac_control = prop;
2621
2622         if (of_property_read_bool(node, "dual_emac"))
2623                 data->dual_emac = 1;
2624
2625         /*
2626          * Populate all the child nodes here...
2627          */
2628         ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2629         /* We do not want to force this, as in some cases may not have child */
2630         if (ret)
2631                 dev_warn(&pdev->dev, "Doesn't have any child node\n");
2632
2633         for_each_available_child_of_node(node, slave_node) {
2634                 struct cpsw_slave_data *slave_data = data->slave_data + i;
2635                 const void *mac_addr = NULL;
2636                 int lenp;
2637                 const __be32 *parp;
2638
2639                 /* This is no slave child node, continue */
2640                 if (strcmp(slave_node->name, "slave"))
2641                         continue;
2642
2643                 slave_data->phy_node = of_parse_phandle(slave_node,
2644                                                         "phy-handle", 0);
2645                 parp = of_get_property(slave_node, "phy_id", &lenp);
2646                 if (slave_data->phy_node) {
2647                         dev_dbg(&pdev->dev,
2648                                 "slave[%d] using phy-handle=\"%s\"\n",
2649                                 i, slave_data->phy_node->full_name);
2650                 } else if (of_phy_is_fixed_link(slave_node)) {
2651                         /* In the case of a fixed PHY, the DT node associated
2652                          * to the PHY is the Ethernet MAC DT node.
2653                          */
2654                         ret = of_phy_register_fixed_link(slave_node);
2655                         if (ret) {
2656                                 if (ret != -EPROBE_DEFER)
2657                                         dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
2658                                 return ret;
2659                         }
2660                         slave_data->phy_node = of_node_get(slave_node);
2661                 } else if (parp) {
2662                         u32 phyid;
2663                         struct device_node *mdio_node;
2664                         struct platform_device *mdio;
2665
2666                         if (lenp != (sizeof(__be32) * 2)) {
2667                                 dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
2668                                 goto no_phy_slave;
2669                         }
2670                         mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
2671                         phyid = be32_to_cpup(parp+1);
2672                         mdio = of_find_device_by_node(mdio_node);
2673                         of_node_put(mdio_node);
2674                         if (!mdio) {
2675                                 dev_err(&pdev->dev, "Missing mdio platform device\n");
2676                                 return -EINVAL;
2677                         }
2678                         snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2679                                  PHY_ID_FMT, mdio->name, phyid);
2680                         put_device(&mdio->dev);
2681                 } else {
2682                         dev_err(&pdev->dev,
2683                                 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
2684                                 i);
2685                         goto no_phy_slave;
2686                 }
2687                 slave_data->phy_if = of_get_phy_mode(slave_node);
2688                 if (slave_data->phy_if < 0) {
2689                         dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
2690                                 i);
2691                         return slave_data->phy_if;
2692                 }
2693
2694 no_phy_slave:
2695                 mac_addr = of_get_mac_address(slave_node);
2696                 if (mac_addr) {
2697                         memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
2698                 } else {
2699                         ret = ti_cm_get_macid(&pdev->dev, i,
2700                                               slave_data->mac_addr);
2701                         if (ret)
2702                                 return ret;
2703                 }
2704                 if (data->dual_emac) {
2705                         if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
2706                                                  &prop)) {
2707                                 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
2708                                 slave_data->dual_emac_res_vlan = i+1;
2709                                 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
2710                                         slave_data->dual_emac_res_vlan, i);
2711                         } else {
2712                                 slave_data->dual_emac_res_vlan = prop;
2713                         }
2714                 }
2715
2716                 i++;
2717                 if (i == data->slaves)
2718                         break;
2719         }
2720
2721         return 0;
2722 }
2723
2724 static void cpsw_remove_dt(struct platform_device *pdev)
2725 {
2726         struct net_device *ndev = platform_get_drvdata(pdev);
2727         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2728         struct cpsw_platform_data *data = &cpsw->data;
2729         struct device_node *node = pdev->dev.of_node;
2730         struct device_node *slave_node;
2731         int i = 0;
2732
2733         for_each_available_child_of_node(node, slave_node) {
2734                 struct cpsw_slave_data *slave_data = &data->slave_data[i];
2735
2736                 if (strcmp(slave_node->name, "slave"))
2737                         continue;
2738
2739                 if (of_phy_is_fixed_link(slave_node))
2740                         of_phy_deregister_fixed_link(slave_node);
2741
2742                 of_node_put(slave_data->phy_node);
2743
2744                 i++;
2745                 if (i == data->slaves)
2746                         break;
2747         }
2748
2749         of_platform_depopulate(&pdev->dev);
2750 }
2751
2752 static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
2753 {
2754         struct cpsw_common              *cpsw = priv->cpsw;
2755         struct cpsw_platform_data       *data = &cpsw->data;
2756         struct net_device               *ndev;
2757         struct cpsw_priv                *priv_sl2;
2758         int ret = 0;
2759
2760         ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
2761         if (!ndev) {
2762                 dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
2763                 return -ENOMEM;
2764         }
2765
2766         priv_sl2 = netdev_priv(ndev);
2767         priv_sl2->cpsw = cpsw;
2768         priv_sl2->ndev = ndev;
2769         priv_sl2->dev  = &ndev->dev;
2770         priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2771
2772         if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
2773                 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
2774                         ETH_ALEN);
2775                 dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
2776                          priv_sl2->mac_addr);
2777         } else {
2778                 random_ether_addr(priv_sl2->mac_addr);
2779                 dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
2780                          priv_sl2->mac_addr);
2781         }
2782         memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
2783
2784         priv_sl2->emac_port = 1;
2785         cpsw->slaves[1].ndev = ndev;
2786         ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2787
2788         ndev->netdev_ops = &cpsw_netdev_ops;
2789         ndev->ethtool_ops = &cpsw_ethtool_ops;
2790
2791         /* register the network device */
2792         SET_NETDEV_DEV(ndev, cpsw->dev);
2793         ret = register_netdev(ndev);
2794         if (ret) {
2795                 dev_err(cpsw->dev, "cpsw: error registering net device\n");
2796                 free_netdev(ndev);
2797                 ret = -ENODEV;
2798         }
2799
2800         return ret;
2801 }
2802
2803 #define CPSW_QUIRK_IRQ          BIT(0)
2804
2805 static struct platform_device_id cpsw_devtype[] = {
2806         {
2807                 /* keep it for existing comaptibles */
2808                 .name = "cpsw",
2809                 .driver_data = CPSW_QUIRK_IRQ,
2810         }, {
2811                 .name = "am335x-cpsw",
2812                 .driver_data = CPSW_QUIRK_IRQ,
2813         }, {
2814                 .name = "am4372-cpsw",
2815                 .driver_data = 0,
2816         }, {
2817                 .name = "dra7-cpsw",
2818                 .driver_data = 0,
2819         }, {
2820                 /* sentinel */
2821         }
2822 };
2823 MODULE_DEVICE_TABLE(platform, cpsw_devtype);
2824
2825 enum ti_cpsw_type {
2826         CPSW = 0,
2827         AM335X_CPSW,
2828         AM4372_CPSW,
2829         DRA7_CPSW,
2830 };
2831
2832 static const struct of_device_id cpsw_of_mtable[] = {
2833         { .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], },
2834         { .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], },
2835         { .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], },
2836         { .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], },
2837         { /* sentinel */ },
2838 };
2839 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
2840
2841 static int cpsw_probe(struct platform_device *pdev)
2842 {
2843         struct clk                      *clk;
2844         struct cpsw_platform_data       *data;
2845         struct net_device               *ndev;
2846         struct cpsw_priv                *priv;
2847         struct cpdma_params             dma_params;
2848         struct cpsw_ale_params          ale_params;
2849         void __iomem                    *ss_regs;
2850         void __iomem                    *cpts_regs;
2851         struct resource                 *res, *ss_res;
2852         const struct of_device_id       *of_id;
2853         struct gpio_descs               *mode;
2854         u32 slave_offset, sliver_offset, slave_size;
2855         struct cpsw_common              *cpsw;
2856         int ret = 0, i;
2857         int irq;
2858
2859         cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
2860         if (!cpsw)
2861                 return -ENOMEM;
2862
2863         cpsw->dev = &pdev->dev;
2864
2865         ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
2866         if (!ndev) {
2867                 dev_err(&pdev->dev, "error allocating net_device\n");
2868                 return -ENOMEM;
2869         }
2870
2871         platform_set_drvdata(pdev, ndev);
2872         priv = netdev_priv(ndev);
2873         priv->cpsw = cpsw;
2874         priv->ndev = ndev;
2875         priv->dev  = &ndev->dev;
2876         priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2877         cpsw->rx_packet_max = max(rx_packet_max, 128);
2878
2879         mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
2880         if (IS_ERR(mode)) {
2881                 ret = PTR_ERR(mode);
2882                 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
2883                 goto clean_ndev_ret;
2884         }
2885
2886         /*
2887          * This may be required here for child devices.
2888          */
2889         pm_runtime_enable(&pdev->dev);
2890
2891         /* Select default pin state */
2892         pinctrl_pm_select_default_state(&pdev->dev);
2893
2894         /* Need to enable clocks with runtime PM api to access module
2895          * registers
2896          */
2897         ret = pm_runtime_get_sync(&pdev->dev);
2898         if (ret < 0) {
2899                 pm_runtime_put_noidle(&pdev->dev);
2900                 goto clean_runtime_disable_ret;
2901         }
2902
2903         ret = cpsw_probe_dt(&cpsw->data, pdev);
2904         if (ret)
2905                 goto clean_dt_ret;
2906
2907         data = &cpsw->data;
2908         cpsw->rx_ch_num = 1;
2909         cpsw->tx_ch_num = 1;
2910
2911         if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
2912                 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
2913                 dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
2914         } else {
2915                 eth_random_addr(priv->mac_addr);
2916                 dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
2917         }
2918
2919         memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2920
2921         cpsw->slaves = devm_kzalloc(&pdev->dev,
2922                                     sizeof(struct cpsw_slave) * data->slaves,
2923                                     GFP_KERNEL);
2924         if (!cpsw->slaves) {
2925                 ret = -ENOMEM;
2926                 goto clean_dt_ret;
2927         }
2928         for (i = 0; i < data->slaves; i++)
2929                 cpsw->slaves[i].slave_num = i;
2930
2931         cpsw->slaves[0].ndev = ndev;
2932         priv->emac_port = 0;
2933
2934         clk = devm_clk_get(&pdev->dev, "fck");
2935         if (IS_ERR(clk)) {
2936                 dev_err(priv->dev, "fck is not found\n");
2937                 ret = -ENODEV;
2938                 goto clean_dt_ret;
2939         }
2940         cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
2941
2942         ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2943         ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
2944         if (IS_ERR(ss_regs)) {
2945                 ret = PTR_ERR(ss_regs);
2946                 goto clean_dt_ret;
2947         }
2948         cpsw->regs = ss_regs;
2949
2950         cpsw->version = readl(&cpsw->regs->id_ver);
2951
2952         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2953         cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
2954         if (IS_ERR(cpsw->wr_regs)) {
2955                 ret = PTR_ERR(cpsw->wr_regs);
2956                 goto clean_dt_ret;
2957         }
2958
2959         memset(&dma_params, 0, sizeof(dma_params));
2960         memset(&ale_params, 0, sizeof(ale_params));
2961
2962         switch (cpsw->version) {
2963         case CPSW_VERSION_1:
2964                 cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
2965                 cpts_regs               = ss_regs + CPSW1_CPTS_OFFSET;
2966                 cpsw->hw_stats       = ss_regs + CPSW1_HW_STATS;
2967                 dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
2968                 dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
2969                 ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
2970                 slave_offset         = CPSW1_SLAVE_OFFSET;
2971                 slave_size           = CPSW1_SLAVE_SIZE;
2972                 sliver_offset        = CPSW1_SLIVER_OFFSET;
2973                 dma_params.desc_mem_phys = 0;
2974                 break;
2975         case CPSW_VERSION_2:
2976         case CPSW_VERSION_3:
2977         case CPSW_VERSION_4:
2978                 cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
2979                 cpts_regs               = ss_regs + CPSW2_CPTS_OFFSET;
2980                 cpsw->hw_stats       = ss_regs + CPSW2_HW_STATS;
2981                 dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
2982                 dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
2983                 ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
2984                 slave_offset         = CPSW2_SLAVE_OFFSET;
2985                 slave_size           = CPSW2_SLAVE_SIZE;
2986                 sliver_offset        = CPSW2_SLIVER_OFFSET;
2987                 dma_params.desc_mem_phys =
2988                         (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
2989                 break;
2990         default:
2991                 dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
2992                 ret = -ENODEV;
2993                 goto clean_dt_ret;
2994         }
2995         for (i = 0; i < cpsw->data.slaves; i++) {
2996                 struct cpsw_slave *slave = &cpsw->slaves[i];
2997
2998                 cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
2999                 slave_offset  += slave_size;
3000                 sliver_offset += SLIVER_SIZE;
3001         }
3002
3003         dma_params.dev          = &pdev->dev;
3004         dma_params.rxthresh     = dma_params.dmaregs + CPDMA_RXTHRESH;
3005         dma_params.rxfree       = dma_params.dmaregs + CPDMA_RXFREE;
3006         dma_params.rxhdp        = dma_params.txhdp + CPDMA_RXHDP;
3007         dma_params.txcp         = dma_params.txhdp + CPDMA_TXCP;
3008         dma_params.rxcp         = dma_params.txhdp + CPDMA_RXCP;
3009
3010         dma_params.num_chan             = data->channels;
3011         dma_params.has_soft_reset       = true;
3012         dma_params.min_packet_size      = CPSW_MIN_PACKET_SIZE;
3013         dma_params.desc_mem_size        = data->bd_ram_size;
3014         dma_params.desc_align           = 16;
3015         dma_params.has_ext_regs         = true;
3016         dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
3017         dma_params.bus_freq_mhz         = cpsw->bus_freq_mhz;
3018         dma_params.descs_pool_size      = descs_pool_size;
3019
3020         cpsw->dma = cpdma_ctlr_create(&dma_params);
3021         if (!cpsw->dma) {
3022                 dev_err(priv->dev, "error initializing dma\n");
3023                 ret = -ENOMEM;
3024                 goto clean_dt_ret;
3025         }
3026
3027         cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
3028         cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
3029         if (WARN_ON(!cpsw->rxv[0].ch || !cpsw->txv[0].ch)) {
3030                 dev_err(priv->dev, "error initializing dma channels\n");
3031                 ret = -ENOMEM;
3032                 goto clean_dma_ret;
3033         }
3034
3035         ale_params.dev                  = &pdev->dev;
3036         ale_params.ale_ageout           = ale_ageout;
3037         ale_params.ale_entries          = data->ale_entries;
3038         ale_params.ale_ports            = data->slaves;
3039
3040         cpsw->ale = cpsw_ale_create(&ale_params);
3041         if (!cpsw->ale) {
3042                 dev_err(priv->dev, "error initializing ale engine\n");
3043                 ret = -ENODEV;
3044                 goto clean_dma_ret;
3045         }
3046
3047         cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
3048         if (IS_ERR(cpsw->cpts)) {
3049                 ret = PTR_ERR(cpsw->cpts);
3050                 goto clean_ale_ret;
3051         }
3052
3053         ndev->irq = platform_get_irq(pdev, 1);
3054         if (ndev->irq < 0) {
3055                 dev_err(priv->dev, "error getting irq resource\n");
3056                 ret = ndev->irq;
3057                 goto clean_ale_ret;
3058         }
3059
3060         of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
3061         if (of_id) {
3062                 pdev->id_entry = of_id->data;
3063                 if (pdev->id_entry->driver_data)
3064                         cpsw->quirk_irq = true;
3065         }
3066
3067         /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
3068          * MISC IRQs which are always kept disabled with this driver so
3069          * we will not request them.
3070          *
3071          * If anyone wants to implement support for those, make sure to
3072          * first request and append them to irqs_table array.
3073          */
3074
3075         /* RX IRQ */
3076         irq = platform_get_irq(pdev, 1);
3077         if (irq < 0) {
3078                 ret = irq;
3079                 goto clean_ale_ret;
3080         }
3081
3082         cpsw->irqs_table[0] = irq;
3083         ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
3084                                0, dev_name(&pdev->dev), cpsw);
3085         if (ret < 0) {
3086                 dev_err(priv->dev, "error attaching irq (%d)\n", ret);
3087                 goto clean_ale_ret;
3088         }
3089
3090         /* TX IRQ */
3091         irq = platform_get_irq(pdev, 2);
3092         if (irq < 0) {
3093                 ret = irq;
3094                 goto clean_ale_ret;
3095         }
3096
3097         cpsw->irqs_table[1] = irq;
3098         ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
3099                                0, dev_name(&pdev->dev), cpsw);
3100         if (ret < 0) {
3101                 dev_err(priv->dev, "error attaching irq (%d)\n", ret);
3102                 goto clean_ale_ret;
3103         }
3104
3105         ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3106
3107         ndev->netdev_ops = &cpsw_netdev_ops;
3108         ndev->ethtool_ops = &cpsw_ethtool_ops;
3109         netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
3110         netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
3111         cpsw_split_res(ndev);
3112
3113         /* register the network device */
3114         SET_NETDEV_DEV(ndev, &pdev->dev);
3115         ret = register_netdev(ndev);
3116         if (ret) {
3117                 dev_err(priv->dev, "error registering net device\n");
3118                 ret = -ENODEV;
3119                 goto clean_ale_ret;
3120         }
3121
3122         cpsw_notice(priv, probe,
3123                     "initialized device (regs %pa, irq %d, pool size %d)\n",
3124                     &ss_res->start, ndev->irq, dma_params.descs_pool_size);
3125         if (cpsw->data.dual_emac) {
3126                 ret = cpsw_probe_dual_emac(priv);
3127                 if (ret) {
3128                         cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
3129                         goto clean_unregister_netdev_ret;
3130                 }
3131         }
3132
3133         pm_runtime_put(&pdev->dev);
3134
3135         return 0;
3136
3137 clean_unregister_netdev_ret:
3138         unregister_netdev(ndev);
3139 clean_ale_ret:
3140         cpsw_ale_destroy(cpsw->ale);
3141 clean_dma_ret:
3142         cpdma_ctlr_destroy(cpsw->dma);
3143 clean_dt_ret:
3144         cpsw_remove_dt(pdev);
3145         pm_runtime_put_sync(&pdev->dev);
3146 clean_runtime_disable_ret:
3147         pm_runtime_disable(&pdev->dev);
3148 clean_ndev_ret:
3149         free_netdev(priv->ndev);
3150         return ret;
3151 }
3152
3153 static int cpsw_remove(struct platform_device *pdev)
3154 {
3155         struct net_device *ndev = platform_get_drvdata(pdev);
3156         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3157         int ret;
3158
3159         ret = pm_runtime_get_sync(&pdev->dev);
3160         if (ret < 0) {
3161                 pm_runtime_put_noidle(&pdev->dev);
3162                 return ret;
3163         }
3164
3165         if (cpsw->data.dual_emac)
3166                 unregister_netdev(cpsw->slaves[1].ndev);
3167         unregister_netdev(ndev);
3168
3169         cpts_release(cpsw->cpts);
3170         cpsw_ale_destroy(cpsw->ale);
3171         cpdma_ctlr_destroy(cpsw->dma);
3172         cpsw_remove_dt(pdev);
3173         pm_runtime_put_sync(&pdev->dev);
3174         pm_runtime_disable(&pdev->dev);
3175         if (cpsw->data.dual_emac)
3176                 free_netdev(cpsw->slaves[1].ndev);
3177         free_netdev(ndev);
3178         return 0;
3179 }
3180
3181 #ifdef CONFIG_PM_SLEEP
3182 static int cpsw_suspend(struct device *dev)
3183 {
3184         struct platform_device  *pdev = to_platform_device(dev);
3185         struct net_device       *ndev = platform_get_drvdata(pdev);
3186         struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
3187
3188         if (cpsw->data.dual_emac) {
3189                 int i;
3190
3191                 for (i = 0; i < cpsw->data.slaves; i++) {
3192                         if (netif_running(cpsw->slaves[i].ndev))
3193                                 cpsw_ndo_stop(cpsw->slaves[i].ndev);
3194                 }
3195         } else {
3196                 if (netif_running(ndev))
3197                         cpsw_ndo_stop(ndev);
3198         }
3199
3200         /* Select sleep pin state */
3201         pinctrl_pm_select_sleep_state(dev);
3202
3203         return 0;
3204 }
3205
3206 static int cpsw_resume(struct device *dev)
3207 {
3208         struct platform_device  *pdev = to_platform_device(dev);
3209         struct net_device       *ndev = platform_get_drvdata(pdev);
3210         struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
3211
3212         /* Select default pin state */
3213         pinctrl_pm_select_default_state(dev);
3214
3215         /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
3216         rtnl_lock();
3217         if (cpsw->data.dual_emac) {
3218                 int i;
3219
3220                 for (i = 0; i < cpsw->data.slaves; i++) {
3221                         if (netif_running(cpsw->slaves[i].ndev))
3222                                 cpsw_ndo_open(cpsw->slaves[i].ndev);
3223                 }
3224         } else {
3225                 if (netif_running(ndev))
3226                         cpsw_ndo_open(ndev);
3227         }
3228         rtnl_unlock();
3229
3230         return 0;
3231 }
3232 #endif
3233
3234 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
3235
3236 static struct platform_driver cpsw_driver = {
3237         .driver = {
3238                 .name    = "cpsw",
3239                 .pm      = &cpsw_pm_ops,
3240                 .of_match_table = cpsw_of_mtable,
3241         },
3242         .probe = cpsw_probe,
3243         .remove = cpsw_remove,
3244 };
3245
3246 module_platform_driver(cpsw_driver);
3247
3248 MODULE_LICENSE("GPL");
3249 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
3250 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
3251 MODULE_DESCRIPTION("TI CPSW Ethernet driver");