Merge remote-tracking branches 'asoc/topic/ac97', 'asoc/topic/ac97-mfd', 'asoc/topic...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148         u32 queue;
149
150         for (queue = 0; queue < rx_queues_cnt; queue++) {
151                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152
153                 napi_disable(&rx_q->napi);
154         }
155 }
156
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164         u32 queue;
165
166         for (queue = 0; queue < rx_queues_cnt; queue++) {
167                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168
169                 napi_enable(&rx_q->napi);
170         }
171 }
172
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180         u32 queue;
181
182         for (queue = 0; queue < tx_queues_cnt; queue++)
183                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193         u32 queue;
194
195         for (queue = 0; queue < tx_queues_cnt; queue++)
196                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *      If a specific clk_csr value is passed from the platform
206  *      this means that the CSR Clock Range selection cannot be
207  *      changed at run-time and it is fixed (as reported in the driver
208  *      documentation). Viceversa the driver will try to set the MDC
209  *      clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213         u32 clk_rate;
214
215         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216
217         /* Platform provided default clk_csr would be assumed valid
218          * for all other cases except for the below mentioned ones.
219          * For values higher than the IEEE 802.3 specified frequency
220          * we can not estimate the proper divider as it is not known
221          * the frequency of clk_csr_i. So we do not change the default
222          * divider.
223          */
224         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225                 if (clk_rate < CSR_F_35M)
226                         priv->clk_csr = STMMAC_CSR_20_35M;
227                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228                         priv->clk_csr = STMMAC_CSR_35_60M;
229                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230                         priv->clk_csr = STMMAC_CSR_60_100M;
231                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232                         priv->clk_csr = STMMAC_CSR_100_150M;
233                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234                         priv->clk_csr = STMMAC_CSR_150_250M;
235                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236                         priv->clk_csr = STMMAC_CSR_250_300M;
237         }
238
239         if (priv->plat->has_sun8i) {
240                 if (clk_rate > 160000000)
241                         priv->clk_csr = 0x03;
242                 else if (clk_rate > 80000000)
243                         priv->clk_csr = 0x02;
244                 else if (clk_rate > 40000000)
245                         priv->clk_csr = 0x01;
246                 else
247                         priv->clk_csr = 0;
248         }
249 }
250
251 static void print_pkt(unsigned char *buf, int len)
252 {
253         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260         u32 avail;
261
262         if (tx_q->dirty_tx > tx_q->cur_tx)
263                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264         else
265                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266
267         return avail;
268 }
269
270 /**
271  * stmmac_rx_dirty - Get RX queue dirty
272  * @priv: driver private structure
273  * @queue: RX queue index
274  */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278         u32 dirty;
279
280         if (rx_q->dirty_rx <= rx_q->cur_rx)
281                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
282         else
283                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284
285         return dirty;
286 }
287
288 /**
289  * stmmac_hw_fix_mac_speed - callback for speed selection
290  * @priv: driver private structure
291  * Description: on some platforms (e.g. ST), some HW system configuration
292  * registers have to be set according to the link speed negotiated.
293  */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296         struct net_device *ndev = priv->dev;
297         struct phy_device *phydev = ndev->phydev;
298
299         if (likely(priv->plat->fix_mac_speed))
300                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302
303 /**
304  * stmmac_enable_eee_mode - check and enter in LPI mode
305  * @priv: driver private structure
306  * Description: this function is to verify and enter in LPI mode in case of
307  * EEE.
308  */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311         u32 tx_cnt = priv->plat->tx_queues_to_use;
312         u32 queue;
313
314         /* check if all TX queues have the work finished */
315         for (queue = 0; queue < tx_cnt; queue++) {
316                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317
318                 if (tx_q->dirty_tx != tx_q->cur_tx)
319                         return; /* still unfinished work */
320         }
321
322         /* Check and enter in LPI mode */
323         if (!priv->tx_path_in_lpi_mode)
324                 priv->hw->mac->set_eee_mode(priv->hw,
325                                             priv->plat->en_tx_lpi_clockgating);
326 }
327
328 /**
329  * stmmac_disable_eee_mode - disable and exit from LPI mode
330  * @priv: driver private structure
331  * Description: this function is to exit and disable EEE in case of
332  * LPI state is true. This is called by the xmit.
333  */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336         priv->hw->mac->reset_eee_mode(priv->hw);
337         del_timer_sync(&priv->eee_ctrl_timer);
338         priv->tx_path_in_lpi_mode = false;
339 }
340
341 /**
342  * stmmac_eee_ctrl_timer - EEE TX SW timer.
343  * @arg : data hook
344  * Description:
345  *  if there is no data transfer and if we are not in LPI state,
346  *  then MAC Transmitter can be moved to LPI state.
347  */
348 static void stmmac_eee_ctrl_timer(unsigned long arg)
349 {
350         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
351
352         stmmac_enable_eee_mode(priv);
353         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355
356 /**
357  * stmmac_eee_init - init EEE
358  * @priv: driver private structure
359  * Description:
360  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
361  *  can also manage EEE, this function enable the LPI state and start related
362  *  timer.
363  */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366         struct net_device *ndev = priv->dev;
367         unsigned long flags;
368         bool ret = false;
369
370         /* Using PCS we cannot dial with the phy registers at this stage
371          * so we do not support extra feature like EEE.
372          */
373         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
374             (priv->hw->pcs == STMMAC_PCS_TBI) ||
375             (priv->hw->pcs == STMMAC_PCS_RTBI))
376                 goto out;
377
378         /* MAC core supports the EEE feature. */
379         if (priv->dma_cap.eee) {
380                 int tx_lpi_timer = priv->tx_lpi_timer;
381
382                 /* Check if the PHY supports EEE */
383                 if (phy_init_eee(ndev->phydev, 1)) {
384                         /* To manage at run-time if the EEE cannot be supported
385                          * anymore (for example because the lp caps have been
386                          * changed).
387                          * In that case the driver disable own timers.
388                          */
389                         spin_lock_irqsave(&priv->lock, flags);
390                         if (priv->eee_active) {
391                                 netdev_dbg(priv->dev, "disable EEE\n");
392                                 del_timer_sync(&priv->eee_ctrl_timer);
393                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
394                                                              tx_lpi_timer);
395                         }
396                         priv->eee_active = 0;
397                         spin_unlock_irqrestore(&priv->lock, flags);
398                         goto out;
399                 }
400                 /* Activate the EEE and start timers */
401                 spin_lock_irqsave(&priv->lock, flags);
402                 if (!priv->eee_active) {
403                         priv->eee_active = 1;
404                         setup_timer(&priv->eee_ctrl_timer,
405                                     stmmac_eee_ctrl_timer,
406                                     (unsigned long)priv);
407                         mod_timer(&priv->eee_ctrl_timer,
408                                   STMMAC_LPI_T(eee_timer));
409
410                         priv->hw->mac->set_eee_timer(priv->hw,
411                                                      STMMAC_DEFAULT_LIT_LS,
412                                                      tx_lpi_timer);
413                 }
414                 /* Set HW EEE according to the speed */
415                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
416
417                 ret = true;
418                 spin_unlock_irqrestore(&priv->lock, flags);
419
420                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
421         }
422 out:
423         return ret;
424 }
425
426 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
427  * @priv: driver private structure
428  * @p : descriptor pointer
429  * @skb : the socket buffer
430  * Description :
431  * This function will read timestamp from the descriptor & pass it to stack.
432  * and also perform some sanity checks.
433  */
434 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
435                                    struct dma_desc *p, struct sk_buff *skb)
436 {
437         struct skb_shared_hwtstamps shhwtstamp;
438         u64 ns;
439
440         if (!priv->hwts_tx_en)
441                 return;
442
443         /* exit if skb doesn't support hw tstamp */
444         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
445                 return;
446
447         /* check tx tstamp status */
448         if (priv->hw->desc->get_tx_timestamp_status(p)) {
449                 /* get the valid tstamp */
450                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
451
452                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
453                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
454
455                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
456                 /* pass tstamp to stack */
457                 skb_tstamp_tx(skb, &shhwtstamp);
458         }
459
460         return;
461 }
462
463 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
464  * @priv: driver private structure
465  * @p : descriptor pointer
466  * @np : next descriptor pointer
467  * @skb : the socket buffer
468  * Description :
469  * This function will read received packet's timestamp from the descriptor
470  * and pass it to stack. It also perform some sanity checks.
471  */
472 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473                                    struct dma_desc *np, struct sk_buff *skb)
474 {
475         struct skb_shared_hwtstamps *shhwtstamp = NULL;
476         struct dma_desc *desc = p;
477         u64 ns;
478
479         if (!priv->hwts_rx_en)
480                 return;
481         /* For GMAC4, the valid timestamp is from CTX next desc. */
482         if (priv->plat->has_gmac4)
483                 desc = np;
484
485         /* Check if timestamp is available */
486         if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
487                 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
488                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
489                 shhwtstamp = skb_hwtstamps(skb);
490                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
491                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
492         } else  {
493                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
494         }
495 }
496
497 /**
498  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
499  *  @dev: device pointer.
500  *  @ifr: An IOCTL specific structure, that can contain a pointer to
501  *  a proprietary structure used to pass information to the driver.
502  *  Description:
503  *  This function configures the MAC to enable/disable both outgoing(TX)
504  *  and incoming(RX) packets time stamping based on user input.
505  *  Return Value:
506  *  0 on success and an appropriate -ve integer on failure.
507  */
508 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
509 {
510         struct stmmac_priv *priv = netdev_priv(dev);
511         struct hwtstamp_config config;
512         struct timespec64 now;
513         u64 temp = 0;
514         u32 ptp_v2 = 0;
515         u32 tstamp_all = 0;
516         u32 ptp_over_ipv4_udp = 0;
517         u32 ptp_over_ipv6_udp = 0;
518         u32 ptp_over_ethernet = 0;
519         u32 snap_type_sel = 0;
520         u32 ts_master_en = 0;
521         u32 ts_event_en = 0;
522         u32 value = 0;
523         u32 sec_inc;
524
525         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
526                 netdev_alert(priv->dev, "No support for HW time stamping\n");
527                 priv->hwts_tx_en = 0;
528                 priv->hwts_rx_en = 0;
529
530                 return -EOPNOTSUPP;
531         }
532
533         if (copy_from_user(&config, ifr->ifr_data,
534                            sizeof(struct hwtstamp_config)))
535                 return -EFAULT;
536
537         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
538                    __func__, config.flags, config.tx_type, config.rx_filter);
539
540         /* reserved for future extensions */
541         if (config.flags)
542                 return -EINVAL;
543
544         if (config.tx_type != HWTSTAMP_TX_OFF &&
545             config.tx_type != HWTSTAMP_TX_ON)
546                 return -ERANGE;
547
548         if (priv->adv_ts) {
549                 switch (config.rx_filter) {
550                 case HWTSTAMP_FILTER_NONE:
551                         /* time stamp no incoming packet at all */
552                         config.rx_filter = HWTSTAMP_FILTER_NONE;
553                         break;
554
555                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
556                         /* PTP v1, UDP, any kind of event packet */
557                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
558                         /* take time stamp for all event messages */
559                         if (priv->plat->has_gmac4)
560                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
561                         else
562                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
563
564                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
565                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
566                         break;
567
568                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
569                         /* PTP v1, UDP, Sync packet */
570                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
571                         /* take time stamp for SYNC messages only */
572                         ts_event_en = PTP_TCR_TSEVNTENA;
573
574                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
575                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
576                         break;
577
578                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
579                         /* PTP v1, UDP, Delay_req packet */
580                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
581                         /* take time stamp for Delay_Req messages only */
582                         ts_master_en = PTP_TCR_TSMSTRENA;
583                         ts_event_en = PTP_TCR_TSEVNTENA;
584
585                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
586                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
587                         break;
588
589                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
590                         /* PTP v2, UDP, any kind of event packet */
591                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
592                         ptp_v2 = PTP_TCR_TSVER2ENA;
593                         /* take time stamp for all event messages */
594                         if (priv->plat->has_gmac4)
595                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
596                         else
597                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
598
599                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
600                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
601                         break;
602
603                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
604                         /* PTP v2, UDP, Sync packet */
605                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
606                         ptp_v2 = PTP_TCR_TSVER2ENA;
607                         /* take time stamp for SYNC messages only */
608                         ts_event_en = PTP_TCR_TSEVNTENA;
609
610                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
611                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
612                         break;
613
614                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
615                         /* PTP v2, UDP, Delay_req packet */
616                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
617                         ptp_v2 = PTP_TCR_TSVER2ENA;
618                         /* take time stamp for Delay_Req messages only */
619                         ts_master_en = PTP_TCR_TSMSTRENA;
620                         ts_event_en = PTP_TCR_TSEVNTENA;
621
622                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
623                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
624                         break;
625
626                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
627                         /* PTP v2/802.AS1 any layer, any kind of event packet */
628                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
629                         ptp_v2 = PTP_TCR_TSVER2ENA;
630                         /* take time stamp for all event messages */
631                         if (priv->plat->has_gmac4)
632                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
633                         else
634                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
635
636                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
637                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
638                         ptp_over_ethernet = PTP_TCR_TSIPENA;
639                         break;
640
641                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
642                         /* PTP v2/802.AS1, any layer, Sync packet */
643                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
644                         ptp_v2 = PTP_TCR_TSVER2ENA;
645                         /* take time stamp for SYNC messages only */
646                         ts_event_en = PTP_TCR_TSEVNTENA;
647
648                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
649                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
650                         ptp_over_ethernet = PTP_TCR_TSIPENA;
651                         break;
652
653                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
654                         /* PTP v2/802.AS1, any layer, Delay_req packet */
655                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
656                         ptp_v2 = PTP_TCR_TSVER2ENA;
657                         /* take time stamp for Delay_Req messages only */
658                         ts_master_en = PTP_TCR_TSMSTRENA;
659                         ts_event_en = PTP_TCR_TSEVNTENA;
660
661                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
662                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
663                         ptp_over_ethernet = PTP_TCR_TSIPENA;
664                         break;
665
666                 case HWTSTAMP_FILTER_NTP_ALL:
667                 case HWTSTAMP_FILTER_ALL:
668                         /* time stamp any incoming packet */
669                         config.rx_filter = HWTSTAMP_FILTER_ALL;
670                         tstamp_all = PTP_TCR_TSENALL;
671                         break;
672
673                 default:
674                         return -ERANGE;
675                 }
676         } else {
677                 switch (config.rx_filter) {
678                 case HWTSTAMP_FILTER_NONE:
679                         config.rx_filter = HWTSTAMP_FILTER_NONE;
680                         break;
681                 default:
682                         /* PTP v1, UDP, any kind of event packet */
683                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
684                         break;
685                 }
686         }
687         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
688         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
689
690         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
691                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
692         else {
693                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
694                          tstamp_all | ptp_v2 | ptp_over_ethernet |
695                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
696                          ts_master_en | snap_type_sel);
697                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
698
699                 /* program Sub Second Increment reg */
700                 sec_inc = priv->hw->ptp->config_sub_second_increment(
701                         priv->ptpaddr, priv->plat->clk_ptp_rate,
702                         priv->plat->has_gmac4);
703                 temp = div_u64(1000000000ULL, sec_inc);
704
705                 /* calculate default added value:
706                  * formula is :
707                  * addend = (2^32)/freq_div_ratio;
708                  * where, freq_div_ratio = 1e9ns/sec_inc
709                  */
710                 temp = (u64)(temp << 32);
711                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
712                 priv->hw->ptp->config_addend(priv->ptpaddr,
713                                              priv->default_addend);
714
715                 /* initialize system time */
716                 ktime_get_real_ts64(&now);
717
718                 /* lower 32 bits of tv_sec are safe until y2106 */
719                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
720                                             now.tv_nsec);
721         }
722
723         return copy_to_user(ifr->ifr_data, &config,
724                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
725 }
726
727 /**
728  * stmmac_init_ptp - init PTP
729  * @priv: driver private structure
730  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
731  * This is done by looking at the HW cap. register.
732  * This function also registers the ptp driver.
733  */
734 static int stmmac_init_ptp(struct stmmac_priv *priv)
735 {
736         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
737                 return -EOPNOTSUPP;
738
739         priv->adv_ts = 0;
740         /* Check if adv_ts can be enabled for dwmac 4.x core */
741         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
742                 priv->adv_ts = 1;
743         /* Dwmac 3.x core with extend_desc can support adv_ts */
744         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
745                 priv->adv_ts = 1;
746
747         if (priv->dma_cap.time_stamp)
748                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
749
750         if (priv->adv_ts)
751                 netdev_info(priv->dev,
752                             "IEEE 1588-2008 Advanced Timestamp supported\n");
753
754         priv->hw->ptp = &stmmac_ptp;
755         priv->hwts_tx_en = 0;
756         priv->hwts_rx_en = 0;
757
758         stmmac_ptp_register(priv);
759
760         return 0;
761 }
762
763 static void stmmac_release_ptp(struct stmmac_priv *priv)
764 {
765         if (priv->plat->clk_ptp_ref)
766                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
767         stmmac_ptp_unregister(priv);
768 }
769
770 /**
771  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
772  *  @priv: driver private structure
773  *  Description: It is used for configuring the flow control in all queues
774  */
775 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
776 {
777         u32 tx_cnt = priv->plat->tx_queues_to_use;
778
779         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
780                                  priv->pause, tx_cnt);
781 }
782
783 /**
784  * stmmac_adjust_link - adjusts the link parameters
785  * @dev: net device structure
786  * Description: this is the helper called by the physical abstraction layer
787  * drivers to communicate the phy link status. According the speed and duplex
788  * this driver can invoke registered glue-logic as well.
789  * It also invoke the eee initialization because it could happen when switch
790  * on different networks (that are eee capable).
791  */
792 static void stmmac_adjust_link(struct net_device *dev)
793 {
794         struct stmmac_priv *priv = netdev_priv(dev);
795         struct phy_device *phydev = dev->phydev;
796         unsigned long flags;
797         bool new_state = false;
798
799         if (!phydev)
800                 return;
801
802         spin_lock_irqsave(&priv->lock, flags);
803
804         if (phydev->link) {
805                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
806
807                 /* Now we make sure that we can be in full duplex mode.
808                  * If not, we operate in half-duplex mode. */
809                 if (phydev->duplex != priv->oldduplex) {
810                         new_state = true;
811                         if (!phydev->duplex)
812                                 ctrl &= ~priv->hw->link.duplex;
813                         else
814                                 ctrl |= priv->hw->link.duplex;
815                         priv->oldduplex = phydev->duplex;
816                 }
817                 /* Flow Control operation */
818                 if (phydev->pause)
819                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
820
821                 if (phydev->speed != priv->speed) {
822                         new_state = true;
823                         ctrl &= ~priv->hw->link.speed_mask;
824                         switch (phydev->speed) {
825                         case SPEED_1000:
826                                 ctrl |= priv->hw->link.speed1000;
827                                 break;
828                         case SPEED_100:
829                                 ctrl |= priv->hw->link.speed100;
830                                 break;
831                         case SPEED_10:
832                                 ctrl |= priv->hw->link.speed10;
833                                 break;
834                         default:
835                                 netif_warn(priv, link, priv->dev,
836                                            "broken speed: %d\n", phydev->speed);
837                                 phydev->speed = SPEED_UNKNOWN;
838                                 break;
839                         }
840                         if (phydev->speed != SPEED_UNKNOWN)
841                                 stmmac_hw_fix_mac_speed(priv);
842                         priv->speed = phydev->speed;
843                 }
844
845                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
846
847                 if (!priv->oldlink) {
848                         new_state = true;
849                         priv->oldlink = true;
850                 }
851         } else if (priv->oldlink) {
852                 new_state = true;
853                 priv->oldlink = false;
854                 priv->speed = SPEED_UNKNOWN;
855                 priv->oldduplex = DUPLEX_UNKNOWN;
856         }
857
858         if (new_state && netif_msg_link(priv))
859                 phy_print_status(phydev);
860
861         spin_unlock_irqrestore(&priv->lock, flags);
862
863         if (phydev->is_pseudo_fixed_link)
864                 /* Stop PHY layer to call the hook to adjust the link in case
865                  * of a switch is attached to the stmmac driver.
866                  */
867                 phydev->irq = PHY_IGNORE_INTERRUPT;
868         else
869                 /* At this stage, init the EEE if supported.
870                  * Never called in case of fixed_link.
871                  */
872                 priv->eee_enabled = stmmac_eee_init(priv);
873 }
874
875 /**
876  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
877  * @priv: driver private structure
878  * Description: this is to verify if the HW supports the PCS.
879  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
880  * configured for the TBI, RTBI, or SGMII PHY interface.
881  */
882 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
883 {
884         int interface = priv->plat->interface;
885
886         if (priv->dma_cap.pcs) {
887                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
888                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
889                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
890                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
891                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
892                         priv->hw->pcs = STMMAC_PCS_RGMII;
893                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
894                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
895                         priv->hw->pcs = STMMAC_PCS_SGMII;
896                 }
897         }
898 }
899
900 /**
901  * stmmac_init_phy - PHY initialization
902  * @dev: net device structure
903  * Description: it initializes the driver's PHY state, and attaches the PHY
904  * to the mac driver.
905  *  Return value:
906  *  0 on success
907  */
908 static int stmmac_init_phy(struct net_device *dev)
909 {
910         struct stmmac_priv *priv = netdev_priv(dev);
911         struct phy_device *phydev;
912         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
913         char bus_id[MII_BUS_ID_SIZE];
914         int interface = priv->plat->interface;
915         int max_speed = priv->plat->max_speed;
916         priv->oldlink = false;
917         priv->speed = SPEED_UNKNOWN;
918         priv->oldduplex = DUPLEX_UNKNOWN;
919
920         if (priv->plat->phy_node) {
921                 phydev = of_phy_connect(dev, priv->plat->phy_node,
922                                         &stmmac_adjust_link, 0, interface);
923         } else {
924                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
925                          priv->plat->bus_id);
926
927                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
928                          priv->plat->phy_addr);
929                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
930                            phy_id_fmt);
931
932                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
933                                      interface);
934         }
935
936         if (IS_ERR_OR_NULL(phydev)) {
937                 netdev_err(priv->dev, "Could not attach to PHY\n");
938                 if (!phydev)
939                         return -ENODEV;
940
941                 return PTR_ERR(phydev);
942         }
943
944         /* Stop Advertising 1000BASE Capability if interface is not GMII */
945         if ((interface == PHY_INTERFACE_MODE_MII) ||
946             (interface == PHY_INTERFACE_MODE_RMII) ||
947                 (max_speed < 1000 && max_speed > 0))
948                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
949                                          SUPPORTED_1000baseT_Full);
950
951         /*
952          * Broken HW is sometimes missing the pull-up resistor on the
953          * MDIO line, which results in reads to non-existent devices returning
954          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
955          * device as well.
956          * Note: phydev->phy_id is the result of reading the UID PHY registers.
957          */
958         if (!priv->plat->phy_node && phydev->phy_id == 0) {
959                 phy_disconnect(phydev);
960                 return -ENODEV;
961         }
962
963         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
964          * subsequent PHY polling, make sure we force a link transition if
965          * we have a UP/DOWN/UP transition
966          */
967         if (phydev->is_pseudo_fixed_link)
968                 phydev->irq = PHY_POLL;
969
970         phy_attached_info(phydev);
971         return 0;
972 }
973
974 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
975 {
976         u32 rx_cnt = priv->plat->rx_queues_to_use;
977         void *head_rx;
978         u32 queue;
979
980         /* Display RX rings */
981         for (queue = 0; queue < rx_cnt; queue++) {
982                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
983
984                 pr_info("\tRX Queue %u rings\n", queue);
985
986                 if (priv->extend_desc)
987                         head_rx = (void *)rx_q->dma_erx;
988                 else
989                         head_rx = (void *)rx_q->dma_rx;
990
991                 /* Display RX ring */
992                 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
993         }
994 }
995
996 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
997 {
998         u32 tx_cnt = priv->plat->tx_queues_to_use;
999         void *head_tx;
1000         u32 queue;
1001
1002         /* Display TX rings */
1003         for (queue = 0; queue < tx_cnt; queue++) {
1004                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1005
1006                 pr_info("\tTX Queue %d rings\n", queue);
1007
1008                 if (priv->extend_desc)
1009                         head_tx = (void *)tx_q->dma_etx;
1010                 else
1011                         head_tx = (void *)tx_q->dma_tx;
1012
1013                 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1014         }
1015 }
1016
1017 static void stmmac_display_rings(struct stmmac_priv *priv)
1018 {
1019         /* Display RX ring */
1020         stmmac_display_rx_rings(priv);
1021
1022         /* Display TX ring */
1023         stmmac_display_tx_rings(priv);
1024 }
1025
1026 static int stmmac_set_bfsize(int mtu, int bufsize)
1027 {
1028         int ret = bufsize;
1029
1030         if (mtu >= BUF_SIZE_4KiB)
1031                 ret = BUF_SIZE_8KiB;
1032         else if (mtu >= BUF_SIZE_2KiB)
1033                 ret = BUF_SIZE_4KiB;
1034         else if (mtu > DEFAULT_BUFSIZE)
1035                 ret = BUF_SIZE_2KiB;
1036         else
1037                 ret = DEFAULT_BUFSIZE;
1038
1039         return ret;
1040 }
1041
1042 /**
1043  * stmmac_clear_rx_descriptors - clear RX descriptors
1044  * @priv: driver private structure
1045  * @queue: RX queue index
1046  * Description: this function is called to clear the RX descriptors
1047  * in case of both basic and extended descriptors are used.
1048  */
1049 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1050 {
1051         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1052         int i;
1053
1054         /* Clear the RX descriptors */
1055         for (i = 0; i < DMA_RX_SIZE; i++)
1056                 if (priv->extend_desc)
1057                         priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1058                                                      priv->use_riwt, priv->mode,
1059                                                      (i == DMA_RX_SIZE - 1));
1060                 else
1061                         priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1062                                                      priv->use_riwt, priv->mode,
1063                                                      (i == DMA_RX_SIZE - 1));
1064 }
1065
1066 /**
1067  * stmmac_clear_tx_descriptors - clear tx descriptors
1068  * @priv: driver private structure
1069  * @queue: TX queue index.
1070  * Description: this function is called to clear the TX descriptors
1071  * in case of both basic and extended descriptors are used.
1072  */
1073 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1074 {
1075         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1076         int i;
1077
1078         /* Clear the TX descriptors */
1079         for (i = 0; i < DMA_TX_SIZE; i++)
1080                 if (priv->extend_desc)
1081                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1082                                                      priv->mode,
1083                                                      (i == DMA_TX_SIZE - 1));
1084                 else
1085                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1086                                                      priv->mode,
1087                                                      (i == DMA_TX_SIZE - 1));
1088 }
1089
1090 /**
1091  * stmmac_clear_descriptors - clear descriptors
1092  * @priv: driver private structure
1093  * Description: this function is called to clear the TX and RX descriptors
1094  * in case of both basic and extended descriptors are used.
1095  */
1096 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1097 {
1098         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1099         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1100         u32 queue;
1101
1102         /* Clear the RX descriptors */
1103         for (queue = 0; queue < rx_queue_cnt; queue++)
1104                 stmmac_clear_rx_descriptors(priv, queue);
1105
1106         /* Clear the TX descriptors */
1107         for (queue = 0; queue < tx_queue_cnt; queue++)
1108                 stmmac_clear_tx_descriptors(priv, queue);
1109 }
1110
1111 /**
1112  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1113  * @priv: driver private structure
1114  * @p: descriptor pointer
1115  * @i: descriptor index
1116  * @flags: gfp flag
1117  * @queue: RX queue index
1118  * Description: this function is called to allocate a receive buffer, perform
1119  * the DMA mapping and init the descriptor.
1120  */
1121 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1122                                   int i, gfp_t flags, u32 queue)
1123 {
1124         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1125         struct sk_buff *skb;
1126
1127         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1128         if (!skb) {
1129                 netdev_err(priv->dev,
1130                            "%s: Rx init fails; skb is NULL\n", __func__);
1131                 return -ENOMEM;
1132         }
1133         rx_q->rx_skbuff[i] = skb;
1134         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1135                                                 priv->dma_buf_sz,
1136                                                 DMA_FROM_DEVICE);
1137         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1138                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1139                 dev_kfree_skb_any(skb);
1140                 return -EINVAL;
1141         }
1142
1143         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1144                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1145         else
1146                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1147
1148         if ((priv->hw->mode->init_desc3) &&
1149             (priv->dma_buf_sz == BUF_SIZE_16KiB))
1150                 priv->hw->mode->init_desc3(p);
1151
1152         return 0;
1153 }
1154
1155 /**
1156  * stmmac_free_rx_buffer - free RX dma buffers
1157  * @priv: private structure
1158  * @queue: RX queue index
1159  * @i: buffer index.
1160  */
1161 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1162 {
1163         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1164
1165         if (rx_q->rx_skbuff[i]) {
1166                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1167                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1168                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1169         }
1170         rx_q->rx_skbuff[i] = NULL;
1171 }
1172
1173 /**
1174  * stmmac_free_tx_buffer - free RX dma buffers
1175  * @priv: private structure
1176  * @queue: RX queue index
1177  * @i: buffer index.
1178  */
1179 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1180 {
1181         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1182
1183         if (tx_q->tx_skbuff_dma[i].buf) {
1184                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1185                         dma_unmap_page(priv->device,
1186                                        tx_q->tx_skbuff_dma[i].buf,
1187                                        tx_q->tx_skbuff_dma[i].len,
1188                                        DMA_TO_DEVICE);
1189                 else
1190                         dma_unmap_single(priv->device,
1191                                          tx_q->tx_skbuff_dma[i].buf,
1192                                          tx_q->tx_skbuff_dma[i].len,
1193                                          DMA_TO_DEVICE);
1194         }
1195
1196         if (tx_q->tx_skbuff[i]) {
1197                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1198                 tx_q->tx_skbuff[i] = NULL;
1199                 tx_q->tx_skbuff_dma[i].buf = 0;
1200                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1201         }
1202 }
1203
1204 /**
1205  * init_dma_rx_desc_rings - init the RX descriptor rings
1206  * @dev: net device structure
1207  * @flags: gfp flag.
1208  * Description: this function initializes the DMA RX descriptors
1209  * and allocates the socket buffers. It supports the chained and ring
1210  * modes.
1211  */
1212 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1213 {
1214         struct stmmac_priv *priv = netdev_priv(dev);
1215         u32 rx_count = priv->plat->rx_queues_to_use;
1216         unsigned int bfsize = 0;
1217         int ret = -ENOMEM;
1218         int queue;
1219         int i;
1220
1221         if (priv->hw->mode->set_16kib_bfsize)
1222                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1223
1224         if (bfsize < BUF_SIZE_16KiB)
1225                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1226
1227         priv->dma_buf_sz = bfsize;
1228
1229         /* RX INITIALIZATION */
1230         netif_dbg(priv, probe, priv->dev,
1231                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1232
1233         for (queue = 0; queue < rx_count; queue++) {
1234                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1235
1236                 netif_dbg(priv, probe, priv->dev,
1237                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1238                           (u32)rx_q->dma_rx_phy);
1239
1240                 for (i = 0; i < DMA_RX_SIZE; i++) {
1241                         struct dma_desc *p;
1242
1243                         if (priv->extend_desc)
1244                                 p = &((rx_q->dma_erx + i)->basic);
1245                         else
1246                                 p = rx_q->dma_rx + i;
1247
1248                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1249                                                      queue);
1250                         if (ret)
1251                                 goto err_init_rx_buffers;
1252
1253                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1254                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1255                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1256                 }
1257
1258                 rx_q->cur_rx = 0;
1259                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1260
1261                 stmmac_clear_rx_descriptors(priv, queue);
1262
1263                 /* Setup the chained descriptor addresses */
1264                 if (priv->mode == STMMAC_CHAIN_MODE) {
1265                         if (priv->extend_desc)
1266                                 priv->hw->mode->init(rx_q->dma_erx,
1267                                                      rx_q->dma_rx_phy,
1268                                                      DMA_RX_SIZE, 1);
1269                         else
1270                                 priv->hw->mode->init(rx_q->dma_rx,
1271                                                      rx_q->dma_rx_phy,
1272                                                      DMA_RX_SIZE, 0);
1273                 }
1274         }
1275
1276         buf_sz = bfsize;
1277
1278         return 0;
1279
1280 err_init_rx_buffers:
1281         while (queue >= 0) {
1282                 while (--i >= 0)
1283                         stmmac_free_rx_buffer(priv, queue, i);
1284
1285                 if (queue == 0)
1286                         break;
1287
1288                 i = DMA_RX_SIZE;
1289                 queue--;
1290         }
1291
1292         return ret;
1293 }
1294
1295 /**
1296  * init_dma_tx_desc_rings - init the TX descriptor rings
1297  * @dev: net device structure.
1298  * Description: this function initializes the DMA TX descriptors
1299  * and allocates the socket buffers. It supports the chained and ring
1300  * modes.
1301  */
1302 static int init_dma_tx_desc_rings(struct net_device *dev)
1303 {
1304         struct stmmac_priv *priv = netdev_priv(dev);
1305         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1306         u32 queue;
1307         int i;
1308
1309         for (queue = 0; queue < tx_queue_cnt; queue++) {
1310                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1311
1312                 netif_dbg(priv, probe, priv->dev,
1313                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1314                          (u32)tx_q->dma_tx_phy);
1315
1316                 /* Setup the chained descriptor addresses */
1317                 if (priv->mode == STMMAC_CHAIN_MODE) {
1318                         if (priv->extend_desc)
1319                                 priv->hw->mode->init(tx_q->dma_etx,
1320                                                      tx_q->dma_tx_phy,
1321                                                      DMA_TX_SIZE, 1);
1322                         else
1323                                 priv->hw->mode->init(tx_q->dma_tx,
1324                                                      tx_q->dma_tx_phy,
1325                                                      DMA_TX_SIZE, 0);
1326                 }
1327
1328                 for (i = 0; i < DMA_TX_SIZE; i++) {
1329                         struct dma_desc *p;
1330                         if (priv->extend_desc)
1331                                 p = &((tx_q->dma_etx + i)->basic);
1332                         else
1333                                 p = tx_q->dma_tx + i;
1334
1335                         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1336                                 p->des0 = 0;
1337                                 p->des1 = 0;
1338                                 p->des2 = 0;
1339                                 p->des3 = 0;
1340                         } else {
1341                                 p->des2 = 0;
1342                         }
1343
1344                         tx_q->tx_skbuff_dma[i].buf = 0;
1345                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1346                         tx_q->tx_skbuff_dma[i].len = 0;
1347                         tx_q->tx_skbuff_dma[i].last_segment = false;
1348                         tx_q->tx_skbuff[i] = NULL;
1349                 }
1350
1351                 tx_q->dirty_tx = 0;
1352                 tx_q->cur_tx = 0;
1353
1354                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1355         }
1356
1357         return 0;
1358 }
1359
1360 /**
1361  * init_dma_desc_rings - init the RX/TX descriptor rings
1362  * @dev: net device structure
1363  * @flags: gfp flag.
1364  * Description: this function initializes the DMA RX/TX descriptors
1365  * and allocates the socket buffers. It supports the chained and ring
1366  * modes.
1367  */
1368 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1369 {
1370         struct stmmac_priv *priv = netdev_priv(dev);
1371         int ret;
1372
1373         ret = init_dma_rx_desc_rings(dev, flags);
1374         if (ret)
1375                 return ret;
1376
1377         ret = init_dma_tx_desc_rings(dev);
1378
1379         stmmac_clear_descriptors(priv);
1380
1381         if (netif_msg_hw(priv))
1382                 stmmac_display_rings(priv);
1383
1384         return ret;
1385 }
1386
1387 /**
1388  * dma_free_rx_skbufs - free RX dma buffers
1389  * @priv: private structure
1390  * @queue: RX queue index
1391  */
1392 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1393 {
1394         int i;
1395
1396         for (i = 0; i < DMA_RX_SIZE; i++)
1397                 stmmac_free_rx_buffer(priv, queue, i);
1398 }
1399
1400 /**
1401  * dma_free_tx_skbufs - free TX dma buffers
1402  * @priv: private structure
1403  * @queue: TX queue index
1404  */
1405 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1406 {
1407         int i;
1408
1409         for (i = 0; i < DMA_TX_SIZE; i++)
1410                 stmmac_free_tx_buffer(priv, queue, i);
1411 }
1412
1413 /**
1414  * free_dma_rx_desc_resources - free RX dma desc resources
1415  * @priv: private structure
1416  */
1417 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1418 {
1419         u32 rx_count = priv->plat->rx_queues_to_use;
1420         u32 queue;
1421
1422         /* Free RX queue resources */
1423         for (queue = 0; queue < rx_count; queue++) {
1424                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1425
1426                 /* Release the DMA RX socket buffers */
1427                 dma_free_rx_skbufs(priv, queue);
1428
1429                 /* Free DMA regions of consistent memory previously allocated */
1430                 if (!priv->extend_desc)
1431                         dma_free_coherent(priv->device,
1432                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1433                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1434                 else
1435                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1436                                           sizeof(struct dma_extended_desc),
1437                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1438
1439                 kfree(rx_q->rx_skbuff_dma);
1440                 kfree(rx_q->rx_skbuff);
1441         }
1442 }
1443
1444 /**
1445  * free_dma_tx_desc_resources - free TX dma desc resources
1446  * @priv: private structure
1447  */
1448 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1449 {
1450         u32 tx_count = priv->plat->tx_queues_to_use;
1451         u32 queue;
1452
1453         /* Free TX queue resources */
1454         for (queue = 0; queue < tx_count; queue++) {
1455                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1456
1457                 /* Release the DMA TX socket buffers */
1458                 dma_free_tx_skbufs(priv, queue);
1459
1460                 /* Free DMA regions of consistent memory previously allocated */
1461                 if (!priv->extend_desc)
1462                         dma_free_coherent(priv->device,
1463                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1464                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1465                 else
1466                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1467                                           sizeof(struct dma_extended_desc),
1468                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1469
1470                 kfree(tx_q->tx_skbuff_dma);
1471                 kfree(tx_q->tx_skbuff);
1472         }
1473 }
1474
1475 /**
1476  * alloc_dma_rx_desc_resources - alloc RX resources.
1477  * @priv: private structure
1478  * Description: according to which descriptor can be used (extend or basic)
1479  * this function allocates the resources for TX and RX paths. In case of
1480  * reception, for example, it pre-allocated the RX socket buffer in order to
1481  * allow zero-copy mechanism.
1482  */
1483 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1484 {
1485         u32 rx_count = priv->plat->rx_queues_to_use;
1486         int ret = -ENOMEM;
1487         u32 queue;
1488
1489         /* RX queues buffers and DMA */
1490         for (queue = 0; queue < rx_count; queue++) {
1491                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1492
1493                 rx_q->queue_index = queue;
1494                 rx_q->priv_data = priv;
1495
1496                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1497                                                     sizeof(dma_addr_t),
1498                                                     GFP_KERNEL);
1499                 if (!rx_q->rx_skbuff_dma)
1500                         goto err_dma;
1501
1502                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1503                                                 sizeof(struct sk_buff *),
1504                                                 GFP_KERNEL);
1505                 if (!rx_q->rx_skbuff)
1506                         goto err_dma;
1507
1508                 if (priv->extend_desc) {
1509                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1510                                                             DMA_RX_SIZE *
1511                                                             sizeof(struct
1512                                                             dma_extended_desc),
1513                                                             &rx_q->dma_rx_phy,
1514                                                             GFP_KERNEL);
1515                         if (!rx_q->dma_erx)
1516                                 goto err_dma;
1517
1518                 } else {
1519                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1520                                                            DMA_RX_SIZE *
1521                                                            sizeof(struct
1522                                                            dma_desc),
1523                                                            &rx_q->dma_rx_phy,
1524                                                            GFP_KERNEL);
1525                         if (!rx_q->dma_rx)
1526                                 goto err_dma;
1527                 }
1528         }
1529
1530         return 0;
1531
1532 err_dma:
1533         free_dma_rx_desc_resources(priv);
1534
1535         return ret;
1536 }
1537
1538 /**
1539  * alloc_dma_tx_desc_resources - alloc TX resources.
1540  * @priv: private structure
1541  * Description: according to which descriptor can be used (extend or basic)
1542  * this function allocates the resources for TX and RX paths. In case of
1543  * reception, for example, it pre-allocated the RX socket buffer in order to
1544  * allow zero-copy mechanism.
1545  */
1546 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1547 {
1548         u32 tx_count = priv->plat->tx_queues_to_use;
1549         int ret = -ENOMEM;
1550         u32 queue;
1551
1552         /* TX queues buffers and DMA */
1553         for (queue = 0; queue < tx_count; queue++) {
1554                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1555
1556                 tx_q->queue_index = queue;
1557                 tx_q->priv_data = priv;
1558
1559                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1560                                                     sizeof(*tx_q->tx_skbuff_dma),
1561                                                     GFP_KERNEL);
1562                 if (!tx_q->tx_skbuff_dma)
1563                         goto err_dma;
1564
1565                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1566                                                 sizeof(struct sk_buff *),
1567                                                 GFP_KERNEL);
1568                 if (!tx_q->tx_skbuff)
1569                         goto err_dma;
1570
1571                 if (priv->extend_desc) {
1572                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1573                                                             DMA_TX_SIZE *
1574                                                             sizeof(struct
1575                                                             dma_extended_desc),
1576                                                             &tx_q->dma_tx_phy,
1577                                                             GFP_KERNEL);
1578                         if (!tx_q->dma_etx)
1579                                 goto err_dma;
1580                 } else {
1581                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1582                                                            DMA_TX_SIZE *
1583                                                            sizeof(struct
1584                                                                   dma_desc),
1585                                                            &tx_q->dma_tx_phy,
1586                                                            GFP_KERNEL);
1587                         if (!tx_q->dma_tx)
1588                                 goto err_dma;
1589                 }
1590         }
1591
1592         return 0;
1593
1594 err_dma:
1595         free_dma_tx_desc_resources(priv);
1596
1597         return ret;
1598 }
1599
1600 /**
1601  * alloc_dma_desc_resources - alloc TX/RX resources.
1602  * @priv: private structure
1603  * Description: according to which descriptor can be used (extend or basic)
1604  * this function allocates the resources for TX and RX paths. In case of
1605  * reception, for example, it pre-allocated the RX socket buffer in order to
1606  * allow zero-copy mechanism.
1607  */
1608 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1609 {
1610         /* RX Allocation */
1611         int ret = alloc_dma_rx_desc_resources(priv);
1612
1613         if (ret)
1614                 return ret;
1615
1616         ret = alloc_dma_tx_desc_resources(priv);
1617
1618         return ret;
1619 }
1620
1621 /**
1622  * free_dma_desc_resources - free dma desc resources
1623  * @priv: private structure
1624  */
1625 static void free_dma_desc_resources(struct stmmac_priv *priv)
1626 {
1627         /* Release the DMA RX socket buffers */
1628         free_dma_rx_desc_resources(priv);
1629
1630         /* Release the DMA TX socket buffers */
1631         free_dma_tx_desc_resources(priv);
1632 }
1633
1634 /**
1635  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1636  *  @priv: driver private structure
1637  *  Description: It is used for enabling the rx queues in the MAC
1638  */
1639 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1640 {
1641         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1642         int queue;
1643         u8 mode;
1644
1645         for (queue = 0; queue < rx_queues_count; queue++) {
1646                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1647                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1648         }
1649 }
1650
1651 /**
1652  * stmmac_start_rx_dma - start RX DMA channel
1653  * @priv: driver private structure
1654  * @chan: RX channel index
1655  * Description:
1656  * This starts a RX DMA channel
1657  */
1658 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1659 {
1660         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1661         priv->hw->dma->start_rx(priv->ioaddr, chan);
1662 }
1663
1664 /**
1665  * stmmac_start_tx_dma - start TX DMA channel
1666  * @priv: driver private structure
1667  * @chan: TX channel index
1668  * Description:
1669  * This starts a TX DMA channel
1670  */
1671 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1672 {
1673         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1674         priv->hw->dma->start_tx(priv->ioaddr, chan);
1675 }
1676
1677 /**
1678  * stmmac_stop_rx_dma - stop RX DMA channel
1679  * @priv: driver private structure
1680  * @chan: RX channel index
1681  * Description:
1682  * This stops a RX DMA channel
1683  */
1684 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1685 {
1686         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1687         priv->hw->dma->stop_rx(priv->ioaddr, chan);
1688 }
1689
1690 /**
1691  * stmmac_stop_tx_dma - stop TX DMA channel
1692  * @priv: driver private structure
1693  * @chan: TX channel index
1694  * Description:
1695  * This stops a TX DMA channel
1696  */
1697 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1698 {
1699         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1700         priv->hw->dma->stop_tx(priv->ioaddr, chan);
1701 }
1702
1703 /**
1704  * stmmac_start_all_dma - start all RX and TX DMA channels
1705  * @priv: driver private structure
1706  * Description:
1707  * This starts all the RX and TX DMA channels
1708  */
1709 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1710 {
1711         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1712         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1713         u32 chan = 0;
1714
1715         for (chan = 0; chan < rx_channels_count; chan++)
1716                 stmmac_start_rx_dma(priv, chan);
1717
1718         for (chan = 0; chan < tx_channels_count; chan++)
1719                 stmmac_start_tx_dma(priv, chan);
1720 }
1721
1722 /**
1723  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1724  * @priv: driver private structure
1725  * Description:
1726  * This stops the RX and TX DMA channels
1727  */
1728 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1729 {
1730         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1731         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1732         u32 chan = 0;
1733
1734         for (chan = 0; chan < rx_channels_count; chan++)
1735                 stmmac_stop_rx_dma(priv, chan);
1736
1737         for (chan = 0; chan < tx_channels_count; chan++)
1738                 stmmac_stop_tx_dma(priv, chan);
1739 }
1740
1741 /**
1742  *  stmmac_dma_operation_mode - HW DMA operation mode
1743  *  @priv: driver private structure
1744  *  Description: it is used for configuring the DMA operation mode register in
1745  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1746  */
1747 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1748 {
1749         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1750         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1751         int rxfifosz = priv->plat->rx_fifo_size;
1752         u32 txmode = 0;
1753         u32 rxmode = 0;
1754         u32 chan = 0;
1755
1756         if (rxfifosz == 0)
1757                 rxfifosz = priv->dma_cap.rx_fifo_size;
1758
1759         if (priv->plat->force_thresh_dma_mode) {
1760                 txmode = tc;
1761                 rxmode = tc;
1762         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1763                 /*
1764                  * In case of GMAC, SF mode can be enabled
1765                  * to perform the TX COE in HW. This depends on:
1766                  * 1) TX COE if actually supported
1767                  * 2) There is no bugged Jumbo frame support
1768                  *    that needs to not insert csum in the TDES.
1769                  */
1770                 txmode = SF_DMA_MODE;
1771                 rxmode = SF_DMA_MODE;
1772                 priv->xstats.threshold = SF_DMA_MODE;
1773         } else {
1774                 txmode = tc;
1775                 rxmode = SF_DMA_MODE;
1776         }
1777
1778         /* configure all channels */
1779         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1780                 for (chan = 0; chan < rx_channels_count; chan++)
1781                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1782                                                    rxfifosz);
1783
1784                 for (chan = 0; chan < tx_channels_count; chan++)
1785                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1786         } else {
1787                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1788                                         rxfifosz);
1789         }
1790 }
1791
1792 /**
1793  * stmmac_tx_clean - to manage the transmission completion
1794  * @priv: driver private structure
1795  * @queue: TX queue index
1796  * Description: it reclaims the transmit resources after transmission completes.
1797  */
1798 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1799 {
1800         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1801         unsigned int bytes_compl = 0, pkts_compl = 0;
1802         unsigned int entry;
1803
1804         netif_tx_lock(priv->dev);
1805
1806         priv->xstats.tx_clean++;
1807
1808         entry = tx_q->dirty_tx;
1809         while (entry != tx_q->cur_tx) {
1810                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1811                 struct dma_desc *p;
1812                 int status;
1813
1814                 if (priv->extend_desc)
1815                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1816                 else
1817                         p = tx_q->dma_tx + entry;
1818
1819                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1820                                                       &priv->xstats, p,
1821                                                       priv->ioaddr);
1822                 /* Check if the descriptor is owned by the DMA */
1823                 if (unlikely(status & tx_dma_own))
1824                         break;
1825
1826                 /* Just consider the last segment and ...*/
1827                 if (likely(!(status & tx_not_ls))) {
1828                         /* ... verify the status error condition */
1829                         if (unlikely(status & tx_err)) {
1830                                 priv->dev->stats.tx_errors++;
1831                         } else {
1832                                 priv->dev->stats.tx_packets++;
1833                                 priv->xstats.tx_pkt_n++;
1834                         }
1835                         stmmac_get_tx_hwtstamp(priv, p, skb);
1836                 }
1837
1838                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1839                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1840                                 dma_unmap_page(priv->device,
1841                                                tx_q->tx_skbuff_dma[entry].buf,
1842                                                tx_q->tx_skbuff_dma[entry].len,
1843                                                DMA_TO_DEVICE);
1844                         else
1845                                 dma_unmap_single(priv->device,
1846                                                  tx_q->tx_skbuff_dma[entry].buf,
1847                                                  tx_q->tx_skbuff_dma[entry].len,
1848                                                  DMA_TO_DEVICE);
1849                         tx_q->tx_skbuff_dma[entry].buf = 0;
1850                         tx_q->tx_skbuff_dma[entry].len = 0;
1851                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1852                 }
1853
1854                 if (priv->hw->mode->clean_desc3)
1855                         priv->hw->mode->clean_desc3(tx_q, p);
1856
1857                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1858                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1859
1860                 if (likely(skb != NULL)) {
1861                         pkts_compl++;
1862                         bytes_compl += skb->len;
1863                         dev_consume_skb_any(skb);
1864                         tx_q->tx_skbuff[entry] = NULL;
1865                 }
1866
1867                 priv->hw->desc->release_tx_desc(p, priv->mode);
1868
1869                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1870         }
1871         tx_q->dirty_tx = entry;
1872
1873         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1874                                   pkts_compl, bytes_compl);
1875
1876         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1877                                                                 queue))) &&
1878             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1879
1880                 netif_dbg(priv, tx_done, priv->dev,
1881                           "%s: restart transmit\n", __func__);
1882                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1883         }
1884
1885         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1886                 stmmac_enable_eee_mode(priv);
1887                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1888         }
1889         netif_tx_unlock(priv->dev);
1890 }
1891
1892 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1893 {
1894         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1895 }
1896
1897 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1898 {
1899         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1900 }
1901
1902 /**
1903  * stmmac_tx_err - to manage the tx error
1904  * @priv: driver private structure
1905  * @chan: channel index
1906  * Description: it cleans the descriptors and restarts the transmission
1907  * in case of transmission errors.
1908  */
1909 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1910 {
1911         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1912         int i;
1913
1914         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1915
1916         stmmac_stop_tx_dma(priv, chan);
1917         dma_free_tx_skbufs(priv, chan);
1918         for (i = 0; i < DMA_TX_SIZE; i++)
1919                 if (priv->extend_desc)
1920                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1921                                                      priv->mode,
1922                                                      (i == DMA_TX_SIZE - 1));
1923                 else
1924                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1925                                                      priv->mode,
1926                                                      (i == DMA_TX_SIZE - 1));
1927         tx_q->dirty_tx = 0;
1928         tx_q->cur_tx = 0;
1929         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1930         stmmac_start_tx_dma(priv, chan);
1931
1932         priv->dev->stats.tx_errors++;
1933         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1934 }
1935
1936 /**
1937  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1938  *  @priv: driver private structure
1939  *  @txmode: TX operating mode
1940  *  @rxmode: RX operating mode
1941  *  @chan: channel index
1942  *  Description: it is used for configuring of the DMA operation mode in
1943  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1944  *  mode.
1945  */
1946 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1947                                           u32 rxmode, u32 chan)
1948 {
1949         int rxfifosz = priv->plat->rx_fifo_size;
1950
1951         if (rxfifosz == 0)
1952                 rxfifosz = priv->dma_cap.rx_fifo_size;
1953
1954         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1955                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1956                                            rxfifosz);
1957                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1958         } else {
1959                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1960                                         rxfifosz);
1961         }
1962 }
1963
1964 /**
1965  * stmmac_dma_interrupt - DMA ISR
1966  * @priv: driver private structure
1967  * Description: this is the DMA ISR. It is called by the main ISR.
1968  * It calls the dwmac dma routine and schedule poll method in case of some
1969  * work can be done.
1970  */
1971 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1972 {
1973         u32 tx_channel_count = priv->plat->tx_queues_to_use;
1974         int status;
1975         u32 chan;
1976
1977         for (chan = 0; chan < tx_channel_count; chan++) {
1978                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1979
1980                 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1981                                                       &priv->xstats, chan);
1982                 if (likely((status & handle_rx)) || (status & handle_tx)) {
1983                         if (likely(napi_schedule_prep(&rx_q->napi))) {
1984                                 stmmac_disable_dma_irq(priv, chan);
1985                                 __napi_schedule(&rx_q->napi);
1986                         }
1987                 }
1988
1989                 if (unlikely(status & tx_hard_error_bump_tc)) {
1990                         /* Try to bump up the dma threshold on this failure */
1991                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1992                             (tc <= 256)) {
1993                                 tc += 64;
1994                                 if (priv->plat->force_thresh_dma_mode)
1995                                         stmmac_set_dma_operation_mode(priv,
1996                                                                       tc,
1997                                                                       tc,
1998                                                                       chan);
1999                                 else
2000                                         stmmac_set_dma_operation_mode(priv,
2001                                                                     tc,
2002                                                                     SF_DMA_MODE,
2003                                                                     chan);
2004                                 priv->xstats.threshold = tc;
2005                         }
2006                 } else if (unlikely(status == tx_hard_error)) {
2007                         stmmac_tx_err(priv, chan);
2008                 }
2009         }
2010 }
2011
2012 /**
2013  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2014  * @priv: driver private structure
2015  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2016  */
2017 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2018 {
2019         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2020                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2021
2022         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2023                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2024                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2025         } else {
2026                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2027                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2028         }
2029
2030         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2031
2032         if (priv->dma_cap.rmon) {
2033                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2034                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2035         } else
2036                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2037 }
2038
2039 /**
2040  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2041  * @priv: driver private structure
2042  * Description: select the Enhanced/Alternate or Normal descriptors.
2043  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2044  * supported by the HW capability register.
2045  */
2046 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2047 {
2048         if (priv->plat->enh_desc) {
2049                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2050
2051                 /* GMAC older than 3.50 has no extended descriptors */
2052                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2053                         dev_info(priv->device, "Enabled extended descriptors\n");
2054                         priv->extend_desc = 1;
2055                 } else
2056                         dev_warn(priv->device, "Extended descriptors not supported\n");
2057
2058                 priv->hw->desc = &enh_desc_ops;
2059         } else {
2060                 dev_info(priv->device, "Normal descriptors\n");
2061                 priv->hw->desc = &ndesc_ops;
2062         }
2063 }
2064
2065 /**
2066  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2067  * @priv: driver private structure
2068  * Description:
2069  *  new GMAC chip generations have a new register to indicate the
2070  *  presence of the optional feature/functions.
2071  *  This can be also used to override the value passed through the
2072  *  platform and necessary for old MAC10/100 and GMAC chips.
2073  */
2074 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2075 {
2076         u32 ret = 0;
2077
2078         if (priv->hw->dma->get_hw_feature) {
2079                 priv->hw->dma->get_hw_feature(priv->ioaddr,
2080                                               &priv->dma_cap);
2081                 ret = 1;
2082         }
2083
2084         return ret;
2085 }
2086
2087 /**
2088  * stmmac_check_ether_addr - check if the MAC addr is valid
2089  * @priv: driver private structure
2090  * Description:
2091  * it is to verify if the MAC address is valid, in case of failures it
2092  * generates a random MAC address
2093  */
2094 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2095 {
2096         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2097                 priv->hw->mac->get_umac_addr(priv->hw,
2098                                              priv->dev->dev_addr, 0);
2099                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2100                         eth_hw_addr_random(priv->dev);
2101                 netdev_info(priv->dev, "device MAC address %pM\n",
2102                             priv->dev->dev_addr);
2103         }
2104 }
2105
2106 /**
2107  * stmmac_init_dma_engine - DMA init.
2108  * @priv: driver private structure
2109  * Description:
2110  * It inits the DMA invoking the specific MAC/GMAC callback.
2111  * Some DMA parameters can be passed from the platform;
2112  * in case of these are not passed a default is kept for the MAC or GMAC.
2113  */
2114 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2115 {
2116         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2117         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2118         struct stmmac_rx_queue *rx_q;
2119         struct stmmac_tx_queue *tx_q;
2120         u32 dummy_dma_rx_phy = 0;
2121         u32 dummy_dma_tx_phy = 0;
2122         u32 chan = 0;
2123         int atds = 0;
2124         int ret = 0;
2125
2126         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2127                 dev_err(priv->device, "Invalid DMA configuration\n");
2128                 return -EINVAL;
2129         }
2130
2131         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2132                 atds = 1;
2133
2134         ret = priv->hw->dma->reset(priv->ioaddr);
2135         if (ret) {
2136                 dev_err(priv->device, "Failed to reset the dma\n");
2137                 return ret;
2138         }
2139
2140         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2141                 /* DMA Configuration */
2142                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2143                                     dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2144
2145                 /* DMA RX Channel Configuration */
2146                 for (chan = 0; chan < rx_channels_count; chan++) {
2147                         rx_q = &priv->rx_queue[chan];
2148
2149                         priv->hw->dma->init_rx_chan(priv->ioaddr,
2150                                                     priv->plat->dma_cfg,
2151                                                     rx_q->dma_rx_phy, chan);
2152
2153                         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2154                                     (DMA_RX_SIZE * sizeof(struct dma_desc));
2155                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2156                                                        rx_q->rx_tail_addr,
2157                                                        chan);
2158                 }
2159
2160                 /* DMA TX Channel Configuration */
2161                 for (chan = 0; chan < tx_channels_count; chan++) {
2162                         tx_q = &priv->tx_queue[chan];
2163
2164                         priv->hw->dma->init_chan(priv->ioaddr,
2165                                                  priv->plat->dma_cfg,
2166                                                  chan);
2167
2168                         priv->hw->dma->init_tx_chan(priv->ioaddr,
2169                                                     priv->plat->dma_cfg,
2170                                                     tx_q->dma_tx_phy, chan);
2171
2172                         tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2173                                     (DMA_TX_SIZE * sizeof(struct dma_desc));
2174                         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2175                                                        tx_q->tx_tail_addr,
2176                                                        chan);
2177                 }
2178         } else {
2179                 rx_q = &priv->rx_queue[chan];
2180                 tx_q = &priv->tx_queue[chan];
2181                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2182                                     tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2183         }
2184
2185         if (priv->plat->axi && priv->hw->dma->axi)
2186                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2187
2188         return ret;
2189 }
2190
2191 /**
2192  * stmmac_tx_timer - mitigation sw timer for tx.
2193  * @data: data pointer
2194  * Description:
2195  * This is the timer handler to directly invoke the stmmac_tx_clean.
2196  */
2197 static void stmmac_tx_timer(unsigned long data)
2198 {
2199         struct stmmac_priv *priv = (struct stmmac_priv *)data;
2200         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2201         u32 queue;
2202
2203         /* let's scan all the tx queues */
2204         for (queue = 0; queue < tx_queues_count; queue++)
2205                 stmmac_tx_clean(priv, queue);
2206 }
2207
2208 /**
2209  * stmmac_init_tx_coalesce - init tx mitigation options.
2210  * @priv: driver private structure
2211  * Description:
2212  * This inits the transmit coalesce parameters: i.e. timer rate,
2213  * timer handler and default threshold used for enabling the
2214  * interrupt on completion bit.
2215  */
2216 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2217 {
2218         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2219         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2220         init_timer(&priv->txtimer);
2221         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2222         priv->txtimer.data = (unsigned long)priv;
2223         priv->txtimer.function = stmmac_tx_timer;
2224         add_timer(&priv->txtimer);
2225 }
2226
2227 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2228 {
2229         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2230         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2231         u32 chan;
2232
2233         /* set TX ring length */
2234         if (priv->hw->dma->set_tx_ring_len) {
2235                 for (chan = 0; chan < tx_channels_count; chan++)
2236                         priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2237                                                        (DMA_TX_SIZE - 1), chan);
2238         }
2239
2240         /* set RX ring length */
2241         if (priv->hw->dma->set_rx_ring_len) {
2242                 for (chan = 0; chan < rx_channels_count; chan++)
2243                         priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2244                                                        (DMA_RX_SIZE - 1), chan);
2245         }
2246 }
2247
2248 /**
2249  *  stmmac_set_tx_queue_weight - Set TX queue weight
2250  *  @priv: driver private structure
2251  *  Description: It is used for setting TX queues weight
2252  */
2253 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2254 {
2255         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2256         u32 weight;
2257         u32 queue;
2258
2259         for (queue = 0; queue < tx_queues_count; queue++) {
2260                 weight = priv->plat->tx_queues_cfg[queue].weight;
2261                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2262         }
2263 }
2264
2265 /**
2266  *  stmmac_configure_cbs - Configure CBS in TX queue
2267  *  @priv: driver private structure
2268  *  Description: It is used for configuring CBS in AVB TX queues
2269  */
2270 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2271 {
2272         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2273         u32 mode_to_use;
2274         u32 queue;
2275
2276         /* queue 0 is reserved for legacy traffic */
2277         for (queue = 1; queue < tx_queues_count; queue++) {
2278                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2279                 if (mode_to_use == MTL_QUEUE_DCB)
2280                         continue;
2281
2282                 priv->hw->mac->config_cbs(priv->hw,
2283                                 priv->plat->tx_queues_cfg[queue].send_slope,
2284                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2285                                 priv->plat->tx_queues_cfg[queue].high_credit,
2286                                 priv->plat->tx_queues_cfg[queue].low_credit,
2287                                 queue);
2288         }
2289 }
2290
2291 /**
2292  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2293  *  @priv: driver private structure
2294  *  Description: It is used for mapping RX queues to RX dma channels
2295  */
2296 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2297 {
2298         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2299         u32 queue;
2300         u32 chan;
2301
2302         for (queue = 0; queue < rx_queues_count; queue++) {
2303                 chan = priv->plat->rx_queues_cfg[queue].chan;
2304                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2305         }
2306 }
2307
2308 /**
2309  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2310  *  @priv: driver private structure
2311  *  Description: It is used for configuring the RX Queue Priority
2312  */
2313 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2314 {
2315         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2316         u32 queue;
2317         u32 prio;
2318
2319         for (queue = 0; queue < rx_queues_count; queue++) {
2320                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2321                         continue;
2322
2323                 prio = priv->plat->rx_queues_cfg[queue].prio;
2324                 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2325         }
2326 }
2327
2328 /**
2329  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2330  *  @priv: driver private structure
2331  *  Description: It is used for configuring the TX Queue Priority
2332  */
2333 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2334 {
2335         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2336         u32 queue;
2337         u32 prio;
2338
2339         for (queue = 0; queue < tx_queues_count; queue++) {
2340                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2341                         continue;
2342
2343                 prio = priv->plat->tx_queues_cfg[queue].prio;
2344                 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2345         }
2346 }
2347
2348 /**
2349  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2350  *  @priv: driver private structure
2351  *  Description: It is used for configuring the RX queue routing
2352  */
2353 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2354 {
2355         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2356         u32 queue;
2357         u8 packet;
2358
2359         for (queue = 0; queue < rx_queues_count; queue++) {
2360                 /* no specific packet type routing specified for the queue */
2361                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2362                         continue;
2363
2364                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2365                 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2366         }
2367 }
2368
2369 /**
2370  *  stmmac_mtl_configuration - Configure MTL
2371  *  @priv: driver private structure
2372  *  Description: It is used for configurring MTL
2373  */
2374 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2375 {
2376         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2377         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2378
2379         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2380                 stmmac_set_tx_queue_weight(priv);
2381
2382         /* Configure MTL RX algorithms */
2383         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2384                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2385                                                 priv->plat->rx_sched_algorithm);
2386
2387         /* Configure MTL TX algorithms */
2388         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2389                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2390                                                 priv->plat->tx_sched_algorithm);
2391
2392         /* Configure CBS in AVB TX queues */
2393         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2394                 stmmac_configure_cbs(priv);
2395
2396         /* Map RX MTL to DMA channels */
2397         if (priv->hw->mac->map_mtl_to_dma)
2398                 stmmac_rx_queue_dma_chan_map(priv);
2399
2400         /* Enable MAC RX Queues */
2401         if (priv->hw->mac->rx_queue_enable)
2402                 stmmac_mac_enable_rx_queues(priv);
2403
2404         /* Set RX priorities */
2405         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2406                 stmmac_mac_config_rx_queues_prio(priv);
2407
2408         /* Set TX priorities */
2409         if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2410                 stmmac_mac_config_tx_queues_prio(priv);
2411
2412         /* Set RX routing */
2413         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2414                 stmmac_mac_config_rx_queues_routing(priv);
2415 }
2416
2417 /**
2418  * stmmac_hw_setup - setup mac in a usable state.
2419  *  @dev : pointer to the device structure.
2420  *  Description:
2421  *  this is the main function to setup the HW in a usable state because the
2422  *  dma engine is reset, the core registers are configured (e.g. AXI,
2423  *  Checksum features, timers). The DMA is ready to start receiving and
2424  *  transmitting.
2425  *  Return value:
2426  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2427  *  file on failure.
2428  */
2429 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2430 {
2431         struct stmmac_priv *priv = netdev_priv(dev);
2432         u32 rx_cnt = priv->plat->rx_queues_to_use;
2433         u32 tx_cnt = priv->plat->tx_queues_to_use;
2434         u32 chan;
2435         int ret;
2436
2437         /* DMA initialization and SW reset */
2438         ret = stmmac_init_dma_engine(priv);
2439         if (ret < 0) {
2440                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2441                            __func__);
2442                 return ret;
2443         }
2444
2445         /* Copy the MAC addr into the HW  */
2446         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2447
2448         /* PS and related bits will be programmed according to the speed */
2449         if (priv->hw->pcs) {
2450                 int speed = priv->plat->mac_port_sel_speed;
2451
2452                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2453                     (speed == SPEED_1000)) {
2454                         priv->hw->ps = speed;
2455                 } else {
2456                         dev_warn(priv->device, "invalid port speed\n");
2457                         priv->hw->ps = 0;
2458                 }
2459         }
2460
2461         /* Initialize the MAC Core */
2462         priv->hw->mac->core_init(priv->hw, dev->mtu);
2463
2464         /* Initialize MTL*/
2465         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2466                 stmmac_mtl_configuration(priv);
2467
2468         ret = priv->hw->mac->rx_ipc(priv->hw);
2469         if (!ret) {
2470                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2471                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2472                 priv->hw->rx_csum = 0;
2473         }
2474
2475         /* Enable the MAC Rx/Tx */
2476         priv->hw->mac->set_mac(priv->ioaddr, true);
2477
2478         /* Set the HW DMA mode and the COE */
2479         stmmac_dma_operation_mode(priv);
2480
2481         stmmac_mmc_setup(priv);
2482
2483         if (init_ptp) {
2484                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2485                 if (ret < 0)
2486                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2487
2488                 ret = stmmac_init_ptp(priv);
2489                 if (ret == -EOPNOTSUPP)
2490                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2491                 else if (ret)
2492                         netdev_warn(priv->dev, "PTP init failed\n");
2493         }
2494
2495 #ifdef CONFIG_DEBUG_FS
2496         ret = stmmac_init_fs(dev);
2497         if (ret < 0)
2498                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2499                             __func__);
2500 #endif
2501         /* Start the ball rolling... */
2502         stmmac_start_all_dma(priv);
2503
2504         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2505
2506         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2507                 priv->rx_riwt = MAX_DMA_RIWT;
2508                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2509         }
2510
2511         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2512                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2513
2514         /* set TX and RX rings length */
2515         stmmac_set_rings_length(priv);
2516
2517         /* Enable TSO */
2518         if (priv->tso) {
2519                 for (chan = 0; chan < tx_cnt; chan++)
2520                         priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2521         }
2522
2523         return 0;
2524 }
2525
2526 static void stmmac_hw_teardown(struct net_device *dev)
2527 {
2528         struct stmmac_priv *priv = netdev_priv(dev);
2529
2530         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2531 }
2532
2533 /**
2534  *  stmmac_open - open entry point of the driver
2535  *  @dev : pointer to the device structure.
2536  *  Description:
2537  *  This function is the open entry point of the driver.
2538  *  Return value:
2539  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2540  *  file on failure.
2541  */
2542 static int stmmac_open(struct net_device *dev)
2543 {
2544         struct stmmac_priv *priv = netdev_priv(dev);
2545         int ret;
2546
2547         stmmac_check_ether_addr(priv);
2548
2549         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2550             priv->hw->pcs != STMMAC_PCS_TBI &&
2551             priv->hw->pcs != STMMAC_PCS_RTBI) {
2552                 ret = stmmac_init_phy(dev);
2553                 if (ret) {
2554                         netdev_err(priv->dev,
2555                                    "%s: Cannot attach to PHY (error: %d)\n",
2556                                    __func__, ret);
2557                         return ret;
2558                 }
2559         }
2560
2561         /* Extra statistics */
2562         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2563         priv->xstats.threshold = tc;
2564
2565         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2566         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2567
2568         ret = alloc_dma_desc_resources(priv);
2569         if (ret < 0) {
2570                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2571                            __func__);
2572                 goto dma_desc_error;
2573         }
2574
2575         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2576         if (ret < 0) {
2577                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2578                            __func__);
2579                 goto init_error;
2580         }
2581
2582         ret = stmmac_hw_setup(dev, true);
2583         if (ret < 0) {
2584                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2585                 goto init_error;
2586         }
2587
2588         stmmac_init_tx_coalesce(priv);
2589
2590         if (dev->phydev)
2591                 phy_start(dev->phydev);
2592
2593         /* Request the IRQ lines */
2594         ret = request_irq(dev->irq, stmmac_interrupt,
2595                           IRQF_SHARED, dev->name, dev);
2596         if (unlikely(ret < 0)) {
2597                 netdev_err(priv->dev,
2598                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2599                            __func__, dev->irq, ret);
2600                 goto irq_error;
2601         }
2602
2603         /* Request the Wake IRQ in case of another line is used for WoL */
2604         if (priv->wol_irq != dev->irq) {
2605                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2606                                   IRQF_SHARED, dev->name, dev);
2607                 if (unlikely(ret < 0)) {
2608                         netdev_err(priv->dev,
2609                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2610                                    __func__, priv->wol_irq, ret);
2611                         goto wolirq_error;
2612                 }
2613         }
2614
2615         /* Request the IRQ lines */
2616         if (priv->lpi_irq > 0) {
2617                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2618                                   dev->name, dev);
2619                 if (unlikely(ret < 0)) {
2620                         netdev_err(priv->dev,
2621                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2622                                    __func__, priv->lpi_irq, ret);
2623                         goto lpiirq_error;
2624                 }
2625         }
2626
2627         stmmac_enable_all_queues(priv);
2628         stmmac_start_all_queues(priv);
2629
2630         return 0;
2631
2632 lpiirq_error:
2633         if (priv->wol_irq != dev->irq)
2634                 free_irq(priv->wol_irq, dev);
2635 wolirq_error:
2636         free_irq(dev->irq, dev);
2637 irq_error:
2638         if (dev->phydev)
2639                 phy_stop(dev->phydev);
2640
2641         del_timer_sync(&priv->txtimer);
2642         stmmac_hw_teardown(dev);
2643 init_error:
2644         free_dma_desc_resources(priv);
2645 dma_desc_error:
2646         if (dev->phydev)
2647                 phy_disconnect(dev->phydev);
2648
2649         return ret;
2650 }
2651
2652 /**
2653  *  stmmac_release - close entry point of the driver
2654  *  @dev : device pointer.
2655  *  Description:
2656  *  This is the stop entry point of the driver.
2657  */
2658 static int stmmac_release(struct net_device *dev)
2659 {
2660         struct stmmac_priv *priv = netdev_priv(dev);
2661
2662         if (priv->eee_enabled)
2663                 del_timer_sync(&priv->eee_ctrl_timer);
2664
2665         /* Stop and disconnect the PHY */
2666         if (dev->phydev) {
2667                 phy_stop(dev->phydev);
2668                 phy_disconnect(dev->phydev);
2669         }
2670
2671         stmmac_stop_all_queues(priv);
2672
2673         stmmac_disable_all_queues(priv);
2674
2675         del_timer_sync(&priv->txtimer);
2676
2677         /* Free the IRQ lines */
2678         free_irq(dev->irq, dev);
2679         if (priv->wol_irq != dev->irq)
2680                 free_irq(priv->wol_irq, dev);
2681         if (priv->lpi_irq > 0)
2682                 free_irq(priv->lpi_irq, dev);
2683
2684         /* Stop TX/RX DMA and clear the descriptors */
2685         stmmac_stop_all_dma(priv);
2686
2687         /* Release and free the Rx/Tx resources */
2688         free_dma_desc_resources(priv);
2689
2690         /* Disable the MAC Rx/Tx */
2691         priv->hw->mac->set_mac(priv->ioaddr, false);
2692
2693         netif_carrier_off(dev);
2694
2695 #ifdef CONFIG_DEBUG_FS
2696         stmmac_exit_fs(dev);
2697 #endif
2698
2699         stmmac_release_ptp(priv);
2700
2701         return 0;
2702 }
2703
2704 /**
2705  *  stmmac_tso_allocator - close entry point of the driver
2706  *  @priv: driver private structure
2707  *  @des: buffer start address
2708  *  @total_len: total length to fill in descriptors
2709  *  @last_segmant: condition for the last descriptor
2710  *  @queue: TX queue index
2711  *  Description:
2712  *  This function fills descriptor and request new descriptors according to
2713  *  buffer length to fill
2714  */
2715 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2716                                  int total_len, bool last_segment, u32 queue)
2717 {
2718         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2719         struct dma_desc *desc;
2720         u32 buff_size;
2721         int tmp_len;
2722
2723         tmp_len = total_len;
2724
2725         while (tmp_len > 0) {
2726                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2727                 desc = tx_q->dma_tx + tx_q->cur_tx;
2728
2729                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2730                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2731                             TSO_MAX_BUFF_SIZE : tmp_len;
2732
2733                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2734                         0, 1,
2735                         (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2736                         0, 0);
2737
2738                 tmp_len -= TSO_MAX_BUFF_SIZE;
2739         }
2740 }
2741
2742 /**
2743  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2744  *  @skb : the socket buffer
2745  *  @dev : device pointer
2746  *  Description: this is the transmit function that is called on TSO frames
2747  *  (support available on GMAC4 and newer chips).
2748  *  Diagram below show the ring programming in case of TSO frames:
2749  *
2750  *  First Descriptor
2751  *   --------
2752  *   | DES0 |---> buffer1 = L2/L3/L4 header
2753  *   | DES1 |---> TCP Payload (can continue on next descr...)
2754  *   | DES2 |---> buffer 1 and 2 len
2755  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2756  *   --------
2757  *      |
2758  *     ...
2759  *      |
2760  *   --------
2761  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2762  *   | DES1 | --|
2763  *   | DES2 | --> buffer 1 and 2 len
2764  *   | DES3 |
2765  *   --------
2766  *
2767  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2768  */
2769 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2770 {
2771         struct dma_desc *desc, *first, *mss_desc = NULL;
2772         struct stmmac_priv *priv = netdev_priv(dev);
2773         int nfrags = skb_shinfo(skb)->nr_frags;
2774         u32 queue = skb_get_queue_mapping(skb);
2775         unsigned int first_entry, des;
2776         struct stmmac_tx_queue *tx_q;
2777         int tmp_pay_len = 0;
2778         u32 pay_len, mss;
2779         u8 proto_hdr_len;
2780         int i;
2781
2782         tx_q = &priv->tx_queue[queue];
2783
2784         /* Compute header lengths */
2785         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2786
2787         /* Desc availability based on threshold should be enough safe */
2788         if (unlikely(stmmac_tx_avail(priv, queue) <
2789                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2790                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2791                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2792                                                                 queue));
2793                         /* This is a hard error, log it. */
2794                         netdev_err(priv->dev,
2795                                    "%s: Tx Ring full when queue awake\n",
2796                                    __func__);
2797                 }
2798                 return NETDEV_TX_BUSY;
2799         }
2800
2801         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2802
2803         mss = skb_shinfo(skb)->gso_size;
2804
2805         /* set new MSS value if needed */
2806         if (mss != priv->mss) {
2807                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2808                 priv->hw->desc->set_mss(mss_desc, mss);
2809                 priv->mss = mss;
2810                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2811         }
2812
2813         if (netif_msg_tx_queued(priv)) {
2814                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2815                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2816                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2817                         skb->data_len);
2818         }
2819
2820         first_entry = tx_q->cur_tx;
2821
2822         desc = tx_q->dma_tx + first_entry;
2823         first = desc;
2824
2825         /* first descriptor: fill Headers on Buf1 */
2826         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2827                              DMA_TO_DEVICE);
2828         if (dma_mapping_error(priv->device, des))
2829                 goto dma_map_err;
2830
2831         tx_q->tx_skbuff_dma[first_entry].buf = des;
2832         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2833
2834         first->des0 = cpu_to_le32(des);
2835
2836         /* Fill start of payload in buff2 of first descriptor */
2837         if (pay_len)
2838                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2839
2840         /* If needed take extra descriptors to fill the remaining payload */
2841         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2842
2843         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2844
2845         /* Prepare fragments */
2846         for (i = 0; i < nfrags; i++) {
2847                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2848
2849                 des = skb_frag_dma_map(priv->device, frag, 0,
2850                                        skb_frag_size(frag),
2851                                        DMA_TO_DEVICE);
2852                 if (dma_mapping_error(priv->device, des))
2853                         goto dma_map_err;
2854
2855                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2856                                      (i == nfrags - 1), queue);
2857
2858                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2859                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2860                 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2861                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2862         }
2863
2864         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2865
2866         /* Only the last descriptor gets to point to the skb. */
2867         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2868
2869         /* We've used all descriptors we need for this skb, however,
2870          * advance cur_tx so that it references a fresh descriptor.
2871          * ndo_start_xmit will fill this descriptor the next time it's
2872          * called and stmmac_tx_clean may clean up to this descriptor.
2873          */
2874         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2875
2876         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2877                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2878                           __func__);
2879                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2880         }
2881
2882         dev->stats.tx_bytes += skb->len;
2883         priv->xstats.tx_tso_frames++;
2884         priv->xstats.tx_tso_nfrags += nfrags;
2885
2886         /* Manage tx mitigation */
2887         priv->tx_count_frames += nfrags + 1;
2888         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2889                 mod_timer(&priv->txtimer,
2890                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2891         } else {
2892                 priv->tx_count_frames = 0;
2893                 priv->hw->desc->set_tx_ic(desc);
2894                 priv->xstats.tx_set_ic_bit++;
2895         }
2896
2897         skb_tx_timestamp(skb);
2898
2899         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2900                      priv->hwts_tx_en)) {
2901                 /* declare that device is doing timestamping */
2902                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2903                 priv->hw->desc->enable_tx_timestamp(first);
2904         }
2905
2906         /* Complete the first descriptor before granting the DMA */
2907         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2908                         proto_hdr_len,
2909                         pay_len,
2910                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2911                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2912
2913         /* If context desc is used to change MSS */
2914         if (mss_desc)
2915                 priv->hw->desc->set_tx_owner(mss_desc);
2916
2917         /* The own bit must be the latest setting done when prepare the
2918          * descriptor and then barrier is needed to make sure that
2919          * all is coherent before granting the DMA engine.
2920          */
2921         dma_wmb();
2922
2923         if (netif_msg_pktdata(priv)) {
2924                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2925                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2926                         tx_q->cur_tx, first, nfrags);
2927
2928                 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2929                                              0);
2930
2931                 pr_info(">>> frame to be transmitted: ");
2932                 print_pkt(skb->data, skb_headlen(skb));
2933         }
2934
2935         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2936
2937         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2938                                        queue);
2939
2940         return NETDEV_TX_OK;
2941
2942 dma_map_err:
2943         dev_err(priv->device, "Tx dma map failed\n");
2944         dev_kfree_skb(skb);
2945         priv->dev->stats.tx_dropped++;
2946         return NETDEV_TX_OK;
2947 }
2948
2949 /**
2950  *  stmmac_xmit - Tx entry point of the driver
2951  *  @skb : the socket buffer
2952  *  @dev : device pointer
2953  *  Description : this is the tx entry point of the driver.
2954  *  It programs the chain or the ring and supports oversized frames
2955  *  and SG feature.
2956  */
2957 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2958 {
2959         struct stmmac_priv *priv = netdev_priv(dev);
2960         unsigned int nopaged_len = skb_headlen(skb);
2961         int i, csum_insertion = 0, is_jumbo = 0;
2962         u32 queue = skb_get_queue_mapping(skb);
2963         int nfrags = skb_shinfo(skb)->nr_frags;
2964         int entry;
2965         unsigned int first_entry;
2966         struct dma_desc *desc, *first;
2967         struct stmmac_tx_queue *tx_q;
2968         unsigned int enh_desc;
2969         unsigned int des;
2970
2971         tx_q = &priv->tx_queue[queue];
2972
2973         /* Manage oversized TCP frames for GMAC4 device */
2974         if (skb_is_gso(skb) && priv->tso) {
2975                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
2976                         return stmmac_tso_xmit(skb, dev);
2977         }
2978
2979         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2980                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2981                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2982                                                                 queue));
2983                         /* This is a hard error, log it. */
2984                         netdev_err(priv->dev,
2985                                    "%s: Tx Ring full when queue awake\n",
2986                                    __func__);
2987                 }
2988                 return NETDEV_TX_BUSY;
2989         }
2990
2991         if (priv->tx_path_in_lpi_mode)
2992                 stmmac_disable_eee_mode(priv);
2993
2994         entry = tx_q->cur_tx;
2995         first_entry = entry;
2996
2997         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2998
2999         if (likely(priv->extend_desc))
3000                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3001         else
3002                 desc = tx_q->dma_tx + entry;
3003
3004         first = desc;
3005
3006         enh_desc = priv->plat->enh_desc;
3007         /* To program the descriptors according to the size of the frame */
3008         if (enh_desc)
3009                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3010
3011         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3012                                          DWMAC_CORE_4_00)) {
3013                 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3014                 if (unlikely(entry < 0))
3015                         goto dma_map_err;
3016         }
3017
3018         for (i = 0; i < nfrags; i++) {
3019                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3020                 int len = skb_frag_size(frag);
3021                 bool last_segment = (i == (nfrags - 1));
3022
3023                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3024
3025                 if (likely(priv->extend_desc))
3026                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3027                 else
3028                         desc = tx_q->dma_tx + entry;
3029
3030                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3031                                        DMA_TO_DEVICE);
3032                 if (dma_mapping_error(priv->device, des))
3033                         goto dma_map_err; /* should reuse desc w/o issues */
3034
3035                 tx_q->tx_skbuff[entry] = NULL;
3036
3037                 tx_q->tx_skbuff_dma[entry].buf = des;
3038                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3039                         desc->des0 = cpu_to_le32(des);
3040                 else
3041                         desc->des2 = cpu_to_le32(des);
3042
3043                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3044                 tx_q->tx_skbuff_dma[entry].len = len;
3045                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3046
3047                 /* Prepare the descriptor and set the own bit too */
3048                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3049                                                 priv->mode, 1, last_segment,
3050                                                 skb->len);
3051         }
3052
3053         /* Only the last descriptor gets to point to the skb. */
3054         tx_q->tx_skbuff[entry] = skb;
3055
3056         /* We've used all descriptors we need for this skb, however,
3057          * advance cur_tx so that it references a fresh descriptor.
3058          * ndo_start_xmit will fill this descriptor the next time it's
3059          * called and stmmac_tx_clean may clean up to this descriptor.
3060          */
3061         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3062         tx_q->cur_tx = entry;
3063
3064         if (netif_msg_pktdata(priv)) {
3065                 void *tx_head;
3066
3067                 netdev_dbg(priv->dev,
3068                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3069                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3070                            entry, first, nfrags);
3071
3072                 if (priv->extend_desc)
3073                         tx_head = (void *)tx_q->dma_etx;
3074                 else
3075                         tx_head = (void *)tx_q->dma_tx;
3076
3077                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3078
3079                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3080                 print_pkt(skb->data, skb->len);
3081         }
3082
3083         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3084                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3085                           __func__);
3086                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3087         }
3088
3089         dev->stats.tx_bytes += skb->len;
3090
3091         /* According to the coalesce parameter the IC bit for the latest
3092          * segment is reset and the timer re-started to clean the tx status.
3093          * This approach takes care about the fragments: desc is the first
3094          * element in case of no SG.
3095          */
3096         priv->tx_count_frames += nfrags + 1;
3097         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3098                 mod_timer(&priv->txtimer,
3099                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3100         } else {
3101                 priv->tx_count_frames = 0;
3102                 priv->hw->desc->set_tx_ic(desc);
3103                 priv->xstats.tx_set_ic_bit++;
3104         }
3105
3106         skb_tx_timestamp(skb);
3107
3108         /* Ready to fill the first descriptor and set the OWN bit w/o any
3109          * problems because all the descriptors are actually ready to be
3110          * passed to the DMA engine.
3111          */
3112         if (likely(!is_jumbo)) {
3113                 bool last_segment = (nfrags == 0);
3114
3115                 des = dma_map_single(priv->device, skb->data,
3116                                      nopaged_len, DMA_TO_DEVICE);
3117                 if (dma_mapping_error(priv->device, des))
3118                         goto dma_map_err;
3119
3120                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3121                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3122                         first->des0 = cpu_to_le32(des);
3123                 else
3124                         first->des2 = cpu_to_le32(des);
3125
3126                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3127                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3128
3129                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3130                              priv->hwts_tx_en)) {
3131                         /* declare that device is doing timestamping */
3132                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3133                         priv->hw->desc->enable_tx_timestamp(first);
3134                 }
3135
3136                 /* Prepare the first descriptor setting the OWN bit too */
3137                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3138                                                 csum_insertion, priv->mode, 1,
3139                                                 last_segment, skb->len);
3140
3141                 /* The own bit must be the latest setting done when prepare the
3142                  * descriptor and then barrier is needed to make sure that
3143                  * all is coherent before granting the DMA engine.
3144                  */
3145                 dma_wmb();
3146         }
3147
3148         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3149
3150         if (priv->synopsys_id < DWMAC_CORE_4_00)
3151                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3152         else
3153                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3154                                                queue);
3155
3156         return NETDEV_TX_OK;
3157
3158 dma_map_err:
3159         netdev_err(priv->dev, "Tx DMA map failed\n");
3160         dev_kfree_skb(skb);
3161         priv->dev->stats.tx_dropped++;
3162         return NETDEV_TX_OK;
3163 }
3164
3165 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3166 {
3167         struct ethhdr *ehdr;
3168         u16 vlanid;
3169
3170         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3171             NETIF_F_HW_VLAN_CTAG_RX &&
3172             !__vlan_get_tag(skb, &vlanid)) {
3173                 /* pop the vlan tag */
3174                 ehdr = (struct ethhdr *)skb->data;
3175                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3176                 skb_pull(skb, VLAN_HLEN);
3177                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3178         }
3179 }
3180
3181
3182 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3183 {
3184         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3185                 return 0;
3186
3187         return 1;
3188 }
3189
3190 /**
3191  * stmmac_rx_refill - refill used skb preallocated buffers
3192  * @priv: driver private structure
3193  * @queue: RX queue index
3194  * Description : this is to reallocate the skb for the reception process
3195  * that is based on zero-copy.
3196  */
3197 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3198 {
3199         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3200         int dirty = stmmac_rx_dirty(priv, queue);
3201         unsigned int entry = rx_q->dirty_rx;
3202
3203         int bfsize = priv->dma_buf_sz;
3204
3205         while (dirty-- > 0) {
3206                 struct dma_desc *p;
3207
3208                 if (priv->extend_desc)
3209                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3210                 else
3211                         p = rx_q->dma_rx + entry;
3212
3213                 if (likely(!rx_q->rx_skbuff[entry])) {
3214                         struct sk_buff *skb;
3215
3216                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3217                         if (unlikely(!skb)) {
3218                                 /* so for a while no zero-copy! */
3219                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3220                                 if (unlikely(net_ratelimit()))
3221                                         dev_err(priv->device,
3222                                                 "fail to alloc skb entry %d\n",
3223                                                 entry);
3224                                 break;
3225                         }
3226
3227                         rx_q->rx_skbuff[entry] = skb;
3228                         rx_q->rx_skbuff_dma[entry] =
3229                             dma_map_single(priv->device, skb->data, bfsize,
3230                                            DMA_FROM_DEVICE);
3231                         if (dma_mapping_error(priv->device,
3232                                               rx_q->rx_skbuff_dma[entry])) {
3233                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3234                                 dev_kfree_skb(skb);
3235                                 break;
3236                         }
3237
3238                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3239                                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3240                                 p->des1 = 0;
3241                         } else {
3242                                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3243                         }
3244                         if (priv->hw->mode->refill_desc3)
3245                                 priv->hw->mode->refill_desc3(rx_q, p);
3246
3247                         if (rx_q->rx_zeroc_thresh > 0)
3248                                 rx_q->rx_zeroc_thresh--;
3249
3250                         netif_dbg(priv, rx_status, priv->dev,
3251                                   "refill entry #%d\n", entry);
3252                 }
3253                 dma_wmb();
3254
3255                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3256                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3257                 else
3258                         priv->hw->desc->set_rx_owner(p);
3259
3260                 dma_wmb();
3261
3262                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3263         }
3264         rx_q->dirty_rx = entry;
3265 }
3266
3267 /**
3268  * stmmac_rx - manage the receive process
3269  * @priv: driver private structure
3270  * @limit: napi bugget
3271  * @queue: RX queue index.
3272  * Description :  this the function called by the napi poll method.
3273  * It gets all the frames inside the ring.
3274  */
3275 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3276 {
3277         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3278         unsigned int entry = rx_q->cur_rx;
3279         int coe = priv->hw->rx_csum;
3280         unsigned int next_entry;
3281         unsigned int count = 0;
3282
3283         if (netif_msg_rx_status(priv)) {
3284                 void *rx_head;
3285
3286                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3287                 if (priv->extend_desc)
3288                         rx_head = (void *)rx_q->dma_erx;
3289                 else
3290                         rx_head = (void *)rx_q->dma_rx;
3291
3292                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3293         }
3294         while (count < limit) {
3295                 int status;
3296                 struct dma_desc *p;
3297                 struct dma_desc *np;
3298
3299                 if (priv->extend_desc)
3300                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3301                 else
3302                         p = rx_q->dma_rx + entry;
3303
3304                 /* read the status of the incoming frame */
3305                 status = priv->hw->desc->rx_status(&priv->dev->stats,
3306                                                    &priv->xstats, p);
3307                 /* check if managed by the DMA otherwise go ahead */
3308                 if (unlikely(status & dma_own))
3309                         break;
3310
3311                 count++;
3312
3313                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3314                 next_entry = rx_q->cur_rx;
3315
3316                 if (priv->extend_desc)
3317                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3318                 else
3319                         np = rx_q->dma_rx + next_entry;
3320
3321                 prefetch(np);
3322
3323                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3324                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
3325                                                            &priv->xstats,
3326                                                            rx_q->dma_erx +
3327                                                            entry);
3328                 if (unlikely(status == discard_frame)) {
3329                         priv->dev->stats.rx_errors++;
3330                         if (priv->hwts_rx_en && !priv->extend_desc) {
3331                                 /* DESC2 & DESC3 will be overwritten by device
3332                                  * with timestamp value, hence reinitialize
3333                                  * them in stmmac_rx_refill() function so that
3334                                  * device can reuse it.
3335                                  */
3336                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3337                                 rx_q->rx_skbuff[entry] = NULL;
3338                                 dma_unmap_single(priv->device,
3339                                                  rx_q->rx_skbuff_dma[entry],
3340                                                  priv->dma_buf_sz,
3341                                                  DMA_FROM_DEVICE);
3342                         }
3343                 } else {
3344                         struct sk_buff *skb;
3345                         int frame_len;
3346                         unsigned int des;
3347
3348                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3349                                 des = le32_to_cpu(p->des0);
3350                         else
3351                                 des = le32_to_cpu(p->des2);
3352
3353                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3354
3355                         /*  If frame length is greater than skb buffer size
3356                          *  (preallocated during init) then the packet is
3357                          *  ignored
3358                          */
3359                         if (frame_len > priv->dma_buf_sz) {
3360                                 netdev_err(priv->dev,
3361                                            "len %d larger than size (%d)\n",
3362                                            frame_len, priv->dma_buf_sz);
3363                                 priv->dev->stats.rx_length_errors++;
3364                                 break;
3365                         }
3366
3367                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3368                          * Type frames (LLC/LLC-SNAP)
3369                          */
3370                         if (unlikely(status != llc_snap))
3371                                 frame_len -= ETH_FCS_LEN;
3372
3373                         if (netif_msg_rx_status(priv)) {
3374                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3375                                            p, entry, des);
3376                                 if (frame_len > ETH_FRAME_LEN)
3377                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3378                                                    frame_len, status);
3379                         }
3380
3381                         /* The zero-copy is always used for all the sizes
3382                          * in case of GMAC4 because it needs
3383                          * to refill the used descriptors, always.
3384                          */
3385                         if (unlikely(!priv->plat->has_gmac4 &&
3386                                      ((frame_len < priv->rx_copybreak) ||
3387                                      stmmac_rx_threshold_count(rx_q)))) {
3388                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3389                                                                 frame_len);
3390                                 if (unlikely(!skb)) {
3391                                         if (net_ratelimit())
3392                                                 dev_warn(priv->device,
3393                                                          "packet dropped\n");
3394                                         priv->dev->stats.rx_dropped++;
3395                                         break;
3396                                 }
3397
3398                                 dma_sync_single_for_cpu(priv->device,
3399                                                         rx_q->rx_skbuff_dma
3400                                                         [entry], frame_len,
3401                                                         DMA_FROM_DEVICE);
3402                                 skb_copy_to_linear_data(skb,
3403                                                         rx_q->
3404                                                         rx_skbuff[entry]->data,
3405                                                         frame_len);
3406
3407                                 skb_put(skb, frame_len);
3408                                 dma_sync_single_for_device(priv->device,
3409                                                            rx_q->rx_skbuff_dma
3410                                                            [entry], frame_len,
3411                                                            DMA_FROM_DEVICE);
3412                         } else {
3413                                 skb = rx_q->rx_skbuff[entry];
3414                                 if (unlikely(!skb)) {
3415                                         netdev_err(priv->dev,
3416                                                    "%s: Inconsistent Rx chain\n",
3417                                                    priv->dev->name);
3418                                         priv->dev->stats.rx_dropped++;
3419                                         break;
3420                                 }
3421                                 prefetch(skb->data - NET_IP_ALIGN);
3422                                 rx_q->rx_skbuff[entry] = NULL;
3423                                 rx_q->rx_zeroc_thresh++;
3424
3425                                 skb_put(skb, frame_len);
3426                                 dma_unmap_single(priv->device,
3427                                                  rx_q->rx_skbuff_dma[entry],
3428                                                  priv->dma_buf_sz,
3429                                                  DMA_FROM_DEVICE);
3430                         }
3431
3432                         if (netif_msg_pktdata(priv)) {
3433                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3434                                            frame_len);
3435                                 print_pkt(skb->data, frame_len);
3436                         }
3437
3438                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3439
3440                         stmmac_rx_vlan(priv->dev, skb);
3441
3442                         skb->protocol = eth_type_trans(skb, priv->dev);
3443
3444                         if (unlikely(!coe))
3445                                 skb_checksum_none_assert(skb);
3446                         else
3447                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3448
3449                         napi_gro_receive(&rx_q->napi, skb);
3450
3451                         priv->dev->stats.rx_packets++;
3452                         priv->dev->stats.rx_bytes += frame_len;
3453                 }
3454                 entry = next_entry;
3455         }
3456
3457         stmmac_rx_refill(priv, queue);
3458
3459         priv->xstats.rx_pkt_n += count;
3460
3461         return count;
3462 }
3463
3464 /**
3465  *  stmmac_poll - stmmac poll method (NAPI)
3466  *  @napi : pointer to the napi structure.
3467  *  @budget : maximum number of packets that the current CPU can receive from
3468  *            all interfaces.
3469  *  Description :
3470  *  To look at the incoming frames and clear the tx resources.
3471  */
3472 static int stmmac_poll(struct napi_struct *napi, int budget)
3473 {
3474         struct stmmac_rx_queue *rx_q =
3475                 container_of(napi, struct stmmac_rx_queue, napi);
3476         struct stmmac_priv *priv = rx_q->priv_data;
3477         u32 tx_count = priv->plat->tx_queues_to_use;
3478         u32 chan = rx_q->queue_index;
3479         int work_done = 0;
3480         u32 queue;
3481
3482         priv->xstats.napi_poll++;
3483
3484         /* check all the queues */
3485         for (queue = 0; queue < tx_count; queue++)
3486                 stmmac_tx_clean(priv, queue);
3487
3488         work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3489         if (work_done < budget) {
3490                 napi_complete_done(napi, work_done);
3491                 stmmac_enable_dma_irq(priv, chan);
3492         }
3493         return work_done;
3494 }
3495
3496 /**
3497  *  stmmac_tx_timeout
3498  *  @dev : Pointer to net device structure
3499  *  Description: this function is called when a packet transmission fails to
3500  *   complete within a reasonable time. The driver will mark the error in the
3501  *   netdev structure and arrange for the device to be reset to a sane state
3502  *   in order to transmit a new packet.
3503  */
3504 static void stmmac_tx_timeout(struct net_device *dev)
3505 {
3506         struct stmmac_priv *priv = netdev_priv(dev);
3507         u32 tx_count = priv->plat->tx_queues_to_use;
3508         u32 chan;
3509
3510         /* Clear Tx resources and restart transmitting again */
3511         for (chan = 0; chan < tx_count; chan++)
3512                 stmmac_tx_err(priv, chan);
3513 }
3514
3515 /**
3516  *  stmmac_set_rx_mode - entry point for multicast addressing
3517  *  @dev : pointer to the device structure
3518  *  Description:
3519  *  This function is a driver entry point which gets called by the kernel
3520  *  whenever multicast addresses must be enabled/disabled.
3521  *  Return value:
3522  *  void.
3523  */
3524 static void stmmac_set_rx_mode(struct net_device *dev)
3525 {
3526         struct stmmac_priv *priv = netdev_priv(dev);
3527
3528         priv->hw->mac->set_filter(priv->hw, dev);
3529 }
3530
3531 /**
3532  *  stmmac_change_mtu - entry point to change MTU size for the device.
3533  *  @dev : device pointer.
3534  *  @new_mtu : the new MTU size for the device.
3535  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3536  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3537  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3538  *  Return value:
3539  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3540  *  file on failure.
3541  */
3542 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3543 {
3544         struct stmmac_priv *priv = netdev_priv(dev);
3545
3546         if (netif_running(dev)) {
3547                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3548                 return -EBUSY;
3549         }
3550
3551         dev->mtu = new_mtu;
3552
3553         netdev_update_features(dev);
3554
3555         return 0;
3556 }
3557
3558 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3559                                              netdev_features_t features)
3560 {
3561         struct stmmac_priv *priv = netdev_priv(dev);
3562
3563         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3564                 features &= ~NETIF_F_RXCSUM;
3565
3566         if (!priv->plat->tx_coe)
3567                 features &= ~NETIF_F_CSUM_MASK;
3568
3569         /* Some GMAC devices have a bugged Jumbo frame support that
3570          * needs to have the Tx COE disabled for oversized frames
3571          * (due to limited buffer sizes). In this case we disable
3572          * the TX csum insertion in the TDES and not use SF.
3573          */
3574         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3575                 features &= ~NETIF_F_CSUM_MASK;
3576
3577         /* Disable tso if asked by ethtool */
3578         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3579                 if (features & NETIF_F_TSO)
3580                         priv->tso = true;
3581                 else
3582                         priv->tso = false;
3583         }
3584
3585         return features;
3586 }
3587
3588 static int stmmac_set_features(struct net_device *netdev,
3589                                netdev_features_t features)
3590 {
3591         struct stmmac_priv *priv = netdev_priv(netdev);
3592
3593         /* Keep the COE Type in case of csum is supporting */
3594         if (features & NETIF_F_RXCSUM)
3595                 priv->hw->rx_csum = priv->plat->rx_coe;
3596         else
3597                 priv->hw->rx_csum = 0;
3598         /* No check needed because rx_coe has been set before and it will be
3599          * fixed in case of issue.
3600          */
3601         priv->hw->mac->rx_ipc(priv->hw);
3602
3603         return 0;
3604 }
3605
3606 /**
3607  *  stmmac_interrupt - main ISR
3608  *  @irq: interrupt number.
3609  *  @dev_id: to pass the net device pointer.
3610  *  Description: this is the main driver interrupt service routine.
3611  *  It can call:
3612  *  o DMA service routine (to manage incoming frame reception and transmission
3613  *    status)
3614  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3615  *    interrupts.
3616  */
3617 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3618 {
3619         struct net_device *dev = (struct net_device *)dev_id;
3620         struct stmmac_priv *priv = netdev_priv(dev);
3621         u32 rx_cnt = priv->plat->rx_queues_to_use;
3622         u32 tx_cnt = priv->plat->tx_queues_to_use;
3623         u32 queues_count;
3624         u32 queue;
3625
3626         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3627
3628         if (priv->irq_wake)
3629                 pm_wakeup_event(priv->device, 0);
3630
3631         if (unlikely(!dev)) {
3632                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3633                 return IRQ_NONE;
3634         }
3635
3636         /* To handle GMAC own interrupts */
3637         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3638                 int status = priv->hw->mac->host_irq_status(priv->hw,
3639                                                             &priv->xstats);
3640
3641                 if (unlikely(status)) {
3642                         /* For LPI we need to save the tx status */
3643                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3644                                 priv->tx_path_in_lpi_mode = true;
3645                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3646                                 priv->tx_path_in_lpi_mode = false;
3647                 }
3648
3649                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3650                         for (queue = 0; queue < queues_count; queue++) {
3651                                 struct stmmac_rx_queue *rx_q =
3652                                 &priv->rx_queue[queue];
3653
3654                                 status |=
3655                                 priv->hw->mac->host_mtl_irq_status(priv->hw,
3656                                                                    queue);
3657
3658                                 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3659                                     priv->hw->dma->set_rx_tail_ptr)
3660                                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3661                                                                 rx_q->rx_tail_addr,
3662                                                                 queue);
3663                         }
3664                 }
3665
3666                 /* PCS link status */
3667                 if (priv->hw->pcs) {
3668                         if (priv->xstats.pcs_link)
3669                                 netif_carrier_on(dev);
3670                         else
3671                                 netif_carrier_off(dev);
3672                 }
3673         }
3674
3675         /* To handle DMA interrupts */
3676         stmmac_dma_interrupt(priv);
3677
3678         return IRQ_HANDLED;
3679 }
3680
3681 #ifdef CONFIG_NET_POLL_CONTROLLER
3682 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3683  * to allow network I/O with interrupts disabled.
3684  */
3685 static void stmmac_poll_controller(struct net_device *dev)
3686 {
3687         disable_irq(dev->irq);
3688         stmmac_interrupt(dev->irq, dev);
3689         enable_irq(dev->irq);
3690 }
3691 #endif
3692
3693 /**
3694  *  stmmac_ioctl - Entry point for the Ioctl
3695  *  @dev: Device pointer.
3696  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3697  *  a proprietary structure used to pass information to the driver.
3698  *  @cmd: IOCTL command
3699  *  Description:
3700  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3701  */
3702 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3703 {
3704         int ret = -EOPNOTSUPP;
3705
3706         if (!netif_running(dev))
3707                 return -EINVAL;
3708
3709         switch (cmd) {
3710         case SIOCGMIIPHY:
3711         case SIOCGMIIREG:
3712         case SIOCSMIIREG:
3713                 if (!dev->phydev)
3714                         return -EINVAL;
3715                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3716                 break;
3717         case SIOCSHWTSTAMP:
3718                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3719                 break;
3720         default:
3721                 break;
3722         }
3723
3724         return ret;
3725 }
3726
3727 #ifdef CONFIG_DEBUG_FS
3728 static struct dentry *stmmac_fs_dir;
3729
3730 static void sysfs_display_ring(void *head, int size, int extend_desc,
3731                                struct seq_file *seq)
3732 {
3733         int i;
3734         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3735         struct dma_desc *p = (struct dma_desc *)head;
3736
3737         for (i = 0; i < size; i++) {
3738                 if (extend_desc) {
3739                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3740                                    i, (unsigned int)virt_to_phys(ep),
3741                                    le32_to_cpu(ep->basic.des0),
3742                                    le32_to_cpu(ep->basic.des1),
3743                                    le32_to_cpu(ep->basic.des2),
3744                                    le32_to_cpu(ep->basic.des3));
3745                         ep++;
3746                 } else {
3747                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3748                                    i, (unsigned int)virt_to_phys(p),
3749                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3750                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3751                         p++;
3752                 }
3753                 seq_printf(seq, "\n");
3754         }
3755 }
3756
3757 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3758 {
3759         struct net_device *dev = seq->private;
3760         struct stmmac_priv *priv = netdev_priv(dev);
3761         u32 rx_count = priv->plat->rx_queues_to_use;
3762         u32 tx_count = priv->plat->tx_queues_to_use;
3763         u32 queue;
3764
3765         for (queue = 0; queue < rx_count; queue++) {
3766                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3767
3768                 seq_printf(seq, "RX Queue %d:\n", queue);
3769
3770                 if (priv->extend_desc) {
3771                         seq_printf(seq, "Extended descriptor ring:\n");
3772                         sysfs_display_ring((void *)rx_q->dma_erx,
3773                                            DMA_RX_SIZE, 1, seq);
3774                 } else {
3775                         seq_printf(seq, "Descriptor ring:\n");
3776                         sysfs_display_ring((void *)rx_q->dma_rx,
3777                                            DMA_RX_SIZE, 0, seq);
3778                 }
3779         }
3780
3781         for (queue = 0; queue < tx_count; queue++) {
3782                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3783
3784                 seq_printf(seq, "TX Queue %d:\n", queue);
3785
3786                 if (priv->extend_desc) {
3787                         seq_printf(seq, "Extended descriptor ring:\n");
3788                         sysfs_display_ring((void *)tx_q->dma_etx,
3789                                            DMA_TX_SIZE, 1, seq);
3790                 } else {
3791                         seq_printf(seq, "Descriptor ring:\n");
3792                         sysfs_display_ring((void *)tx_q->dma_tx,
3793                                            DMA_TX_SIZE, 0, seq);
3794                 }
3795         }
3796
3797         return 0;
3798 }
3799
3800 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3801 {
3802         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3803 }
3804
3805 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3806
3807 static const struct file_operations stmmac_rings_status_fops = {
3808         .owner = THIS_MODULE,
3809         .open = stmmac_sysfs_ring_open,
3810         .read = seq_read,
3811         .llseek = seq_lseek,
3812         .release = single_release,
3813 };
3814
3815 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3816 {
3817         struct net_device *dev = seq->private;
3818         struct stmmac_priv *priv = netdev_priv(dev);
3819
3820         if (!priv->hw_cap_support) {
3821                 seq_printf(seq, "DMA HW features not supported\n");
3822                 return 0;
3823         }
3824
3825         seq_printf(seq, "==============================\n");
3826         seq_printf(seq, "\tDMA HW features\n");
3827         seq_printf(seq, "==============================\n");
3828
3829         seq_printf(seq, "\t10/100 Mbps: %s\n",
3830                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3831         seq_printf(seq, "\t1000 Mbps: %s\n",
3832                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3833         seq_printf(seq, "\tHalf duplex: %s\n",
3834                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3835         seq_printf(seq, "\tHash Filter: %s\n",
3836                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3837         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3838                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3839         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3840                    (priv->dma_cap.pcs) ? "Y" : "N");
3841         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3842                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3843         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3844                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3845         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3846                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3847         seq_printf(seq, "\tRMON module: %s\n",
3848                    (priv->dma_cap.rmon) ? "Y" : "N");
3849         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3850                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3851         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3852                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3853         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3854                    (priv->dma_cap.eee) ? "Y" : "N");
3855         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3856         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3857                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3858         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3859                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3860                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3861         } else {
3862                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3863                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3864                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3865                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3866         }
3867         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3868                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3869         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3870                    priv->dma_cap.number_rx_channel);
3871         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3872                    priv->dma_cap.number_tx_channel);
3873         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3874                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3875
3876         return 0;
3877 }
3878
3879 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3880 {
3881         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3882 }
3883
3884 static const struct file_operations stmmac_dma_cap_fops = {
3885         .owner = THIS_MODULE,
3886         .open = stmmac_sysfs_dma_cap_open,
3887         .read = seq_read,
3888         .llseek = seq_lseek,
3889         .release = single_release,
3890 };
3891
3892 static int stmmac_init_fs(struct net_device *dev)
3893 {
3894         struct stmmac_priv *priv = netdev_priv(dev);
3895
3896         /* Create per netdev entries */
3897         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3898
3899         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3900                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3901
3902                 return -ENOMEM;
3903         }
3904
3905         /* Entry to report DMA RX/TX rings */
3906         priv->dbgfs_rings_status =
3907                 debugfs_create_file("descriptors_status", S_IRUGO,
3908                                     priv->dbgfs_dir, dev,
3909                                     &stmmac_rings_status_fops);
3910
3911         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3912                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3913                 debugfs_remove_recursive(priv->dbgfs_dir);
3914
3915                 return -ENOMEM;
3916         }
3917
3918         /* Entry to report the DMA HW features */
3919         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3920                                             priv->dbgfs_dir,
3921                                             dev, &stmmac_dma_cap_fops);
3922
3923         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3924                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3925                 debugfs_remove_recursive(priv->dbgfs_dir);
3926
3927                 return -ENOMEM;
3928         }
3929
3930         return 0;
3931 }
3932
3933 static void stmmac_exit_fs(struct net_device *dev)
3934 {
3935         struct stmmac_priv *priv = netdev_priv(dev);
3936
3937         debugfs_remove_recursive(priv->dbgfs_dir);
3938 }
3939 #endif /* CONFIG_DEBUG_FS */
3940
3941 static const struct net_device_ops stmmac_netdev_ops = {
3942         .ndo_open = stmmac_open,
3943         .ndo_start_xmit = stmmac_xmit,
3944         .ndo_stop = stmmac_release,
3945         .ndo_change_mtu = stmmac_change_mtu,
3946         .ndo_fix_features = stmmac_fix_features,
3947         .ndo_set_features = stmmac_set_features,
3948         .ndo_set_rx_mode = stmmac_set_rx_mode,
3949         .ndo_tx_timeout = stmmac_tx_timeout,
3950         .ndo_do_ioctl = stmmac_ioctl,
3951 #ifdef CONFIG_NET_POLL_CONTROLLER
3952         .ndo_poll_controller = stmmac_poll_controller,
3953 #endif
3954         .ndo_set_mac_address = eth_mac_addr,
3955 };
3956
3957 /**
3958  *  stmmac_hw_init - Init the MAC device
3959  *  @priv: driver private structure
3960  *  Description: this function is to configure the MAC device according to
3961  *  some platform parameters or the HW capability register. It prepares the
3962  *  driver to use either ring or chain modes and to setup either enhanced or
3963  *  normal descriptors.
3964  */
3965 static int stmmac_hw_init(struct stmmac_priv *priv)
3966 {
3967         struct mac_device_info *mac;
3968
3969         /* Identify the MAC HW device */
3970         if (priv->plat->setup) {
3971                 mac = priv->plat->setup(priv);
3972         } else if (priv->plat->has_gmac) {
3973                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3974                 mac = dwmac1000_setup(priv->ioaddr,
3975                                       priv->plat->multicast_filter_bins,
3976                                       priv->plat->unicast_filter_entries,
3977                                       &priv->synopsys_id);
3978         } else if (priv->plat->has_gmac4) {
3979                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3980                 mac = dwmac4_setup(priv->ioaddr,
3981                                    priv->plat->multicast_filter_bins,
3982                                    priv->plat->unicast_filter_entries,
3983                                    &priv->synopsys_id);
3984         } else {
3985                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3986         }
3987         if (!mac)
3988                 return -ENOMEM;
3989
3990         priv->hw = mac;
3991
3992         /* dwmac-sun8i only work in chain mode */
3993         if (priv->plat->has_sun8i)
3994                 chain_mode = 1;
3995
3996         /* To use the chained or ring mode */
3997         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3998                 priv->hw->mode = &dwmac4_ring_mode_ops;
3999         } else {
4000                 if (chain_mode) {
4001                         priv->hw->mode = &chain_mode_ops;
4002                         dev_info(priv->device, "Chain mode enabled\n");
4003                         priv->mode = STMMAC_CHAIN_MODE;
4004                 } else {
4005                         priv->hw->mode = &ring_mode_ops;
4006                         dev_info(priv->device, "Ring mode enabled\n");
4007                         priv->mode = STMMAC_RING_MODE;
4008                 }
4009         }
4010
4011         /* Get the HW capability (new GMAC newer than 3.50a) */
4012         priv->hw_cap_support = stmmac_get_hw_features(priv);
4013         if (priv->hw_cap_support) {
4014                 dev_info(priv->device, "DMA HW capability register supported\n");
4015
4016                 /* We can override some gmac/dma configuration fields: e.g.
4017                  * enh_desc, tx_coe (e.g. that are passed through the
4018                  * platform) with the values from the HW capability
4019                  * register (if supported).
4020                  */
4021                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4022                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4023                 priv->hw->pmt = priv->plat->pmt;
4024
4025                 /* TXCOE doesn't work in thresh DMA mode */
4026                 if (priv->plat->force_thresh_dma_mode)
4027                         priv->plat->tx_coe = 0;
4028                 else
4029                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4030
4031                 /* In case of GMAC4 rx_coe is from HW cap register. */
4032                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4033
4034                 if (priv->dma_cap.rx_coe_type2)
4035                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4036                 else if (priv->dma_cap.rx_coe_type1)
4037                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4038
4039         } else {
4040                 dev_info(priv->device, "No HW DMA feature register supported\n");
4041         }
4042
4043         /* To use alternate (extended), normal or GMAC4 descriptor structures */
4044         if (priv->synopsys_id >= DWMAC_CORE_4_00)
4045                 priv->hw->desc = &dwmac4_desc_ops;
4046         else
4047                 stmmac_selec_desc_mode(priv);
4048
4049         if (priv->plat->rx_coe) {
4050                 priv->hw->rx_csum = priv->plat->rx_coe;
4051                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4052                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4053                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4054         }
4055         if (priv->plat->tx_coe)
4056                 dev_info(priv->device, "TX Checksum insertion supported\n");
4057
4058         if (priv->plat->pmt) {
4059                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4060                 device_set_wakeup_capable(priv->device, 1);
4061         }
4062
4063         if (priv->dma_cap.tsoen)
4064                 dev_info(priv->device, "TSO supported\n");
4065
4066         return 0;
4067 }
4068
4069 /**
4070  * stmmac_dvr_probe
4071  * @device: device pointer
4072  * @plat_dat: platform data pointer
4073  * @res: stmmac resource pointer
4074  * Description: this is the main probe function used to
4075  * call the alloc_etherdev, allocate the priv structure.
4076  * Return:
4077  * returns 0 on success, otherwise errno.
4078  */
4079 int stmmac_dvr_probe(struct device *device,
4080                      struct plat_stmmacenet_data *plat_dat,
4081                      struct stmmac_resources *res)
4082 {
4083         struct net_device *ndev = NULL;
4084         struct stmmac_priv *priv;
4085         int ret = 0;
4086         u32 queue;
4087
4088         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4089                                   MTL_MAX_TX_QUEUES,
4090                                   MTL_MAX_RX_QUEUES);
4091         if (!ndev)
4092                 return -ENOMEM;
4093
4094         SET_NETDEV_DEV(ndev, device);
4095
4096         priv = netdev_priv(ndev);
4097         priv->device = device;
4098         priv->dev = ndev;
4099
4100         stmmac_set_ethtool_ops(ndev);
4101         priv->pause = pause;
4102         priv->plat = plat_dat;
4103         priv->ioaddr = res->addr;
4104         priv->dev->base_addr = (unsigned long)res->addr;
4105
4106         priv->dev->irq = res->irq;
4107         priv->wol_irq = res->wol_irq;
4108         priv->lpi_irq = res->lpi_irq;
4109
4110         if (res->mac)
4111                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4112
4113         dev_set_drvdata(device, priv->dev);
4114
4115         /* Verify driver arguments */
4116         stmmac_verify_args();
4117
4118         /* Override with kernel parameters if supplied XXX CRS XXX
4119          * this needs to have multiple instances
4120          */
4121         if ((phyaddr >= 0) && (phyaddr <= 31))
4122                 priv->plat->phy_addr = phyaddr;
4123
4124         if (priv->plat->stmmac_rst) {
4125                 ret = reset_control_assert(priv->plat->stmmac_rst);
4126                 reset_control_deassert(priv->plat->stmmac_rst);
4127                 /* Some reset controllers have only reset callback instead of
4128                  * assert + deassert callbacks pair.
4129                  */
4130                 if (ret == -ENOTSUPP)
4131                         reset_control_reset(priv->plat->stmmac_rst);
4132         }
4133
4134         /* Init MAC and get the capabilities */
4135         ret = stmmac_hw_init(priv);
4136         if (ret)
4137                 goto error_hw_init;
4138
4139         /* Configure real RX and TX queues */
4140         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4141         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4142
4143         ndev->netdev_ops = &stmmac_netdev_ops;
4144
4145         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4146                             NETIF_F_RXCSUM;
4147
4148         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4149                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4150                 priv->tso = true;
4151                 dev_info(priv->device, "TSO feature enabled\n");
4152         }
4153         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4154         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4155 #ifdef STMMAC_VLAN_TAG_USED
4156         /* Both mac100 and gmac support receive VLAN tag detection */
4157         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4158 #endif
4159         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4160
4161         /* MTU range: 46 - hw-specific max */
4162         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4163         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4164                 ndev->max_mtu = JUMBO_LEN;
4165         else
4166                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4167         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4168          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4169          */
4170         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4171             (priv->plat->maxmtu >= ndev->min_mtu))
4172                 ndev->max_mtu = priv->plat->maxmtu;
4173         else if (priv->plat->maxmtu < ndev->min_mtu)
4174                 dev_warn(priv->device,
4175                          "%s: warning: maxmtu having invalid value (%d)\n",
4176                          __func__, priv->plat->maxmtu);
4177
4178         if (flow_ctrl)
4179                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4180
4181         /* Rx Watchdog is available in the COREs newer than the 3.40.
4182          * In some case, for example on bugged HW this feature
4183          * has to be disable and this can be done by passing the
4184          * riwt_off field from the platform.
4185          */
4186         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4187                 priv->use_riwt = 1;
4188                 dev_info(priv->device,
4189                          "Enable RX Mitigation via HW Watchdog Timer\n");
4190         }
4191
4192         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4193                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4194
4195                 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4196                                (8 * priv->plat->rx_queues_to_use));
4197         }
4198
4199         spin_lock_init(&priv->lock);
4200
4201         /* If a specific clk_csr value is passed from the platform
4202          * this means that the CSR Clock Range selection cannot be
4203          * changed at run-time and it is fixed. Viceversa the driver'll try to
4204          * set the MDC clock dynamically according to the csr actual
4205          * clock input.
4206          */
4207         if (!priv->plat->clk_csr)
4208                 stmmac_clk_csr_set(priv);
4209         else
4210                 priv->clk_csr = priv->plat->clk_csr;
4211
4212         stmmac_check_pcs_mode(priv);
4213
4214         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4215             priv->hw->pcs != STMMAC_PCS_TBI &&
4216             priv->hw->pcs != STMMAC_PCS_RTBI) {
4217                 /* MDIO bus Registration */
4218                 ret = stmmac_mdio_register(ndev);
4219                 if (ret < 0) {
4220                         dev_err(priv->device,
4221                                 "%s: MDIO bus (id: %d) registration failed",
4222                                 __func__, priv->plat->bus_id);
4223                         goto error_mdio_register;
4224                 }
4225         }
4226
4227         ret = register_netdev(ndev);
4228         if (ret) {
4229                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4230                         __func__, ret);
4231                 goto error_netdev_register;
4232         }
4233
4234         return ret;
4235
4236 error_netdev_register:
4237         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4238             priv->hw->pcs != STMMAC_PCS_TBI &&
4239             priv->hw->pcs != STMMAC_PCS_RTBI)
4240                 stmmac_mdio_unregister(ndev);
4241 error_mdio_register:
4242         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4243                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4244
4245                 netif_napi_del(&rx_q->napi);
4246         }
4247 error_hw_init:
4248         free_netdev(ndev);
4249
4250         return ret;
4251 }
4252 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4253
4254 /**
4255  * stmmac_dvr_remove
4256  * @dev: device pointer
4257  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4258  * changes the link status, releases the DMA descriptor rings.
4259  */
4260 int stmmac_dvr_remove(struct device *dev)
4261 {
4262         struct net_device *ndev = dev_get_drvdata(dev);
4263         struct stmmac_priv *priv = netdev_priv(ndev);
4264
4265         netdev_info(priv->dev, "%s: removing driver", __func__);
4266
4267         stmmac_stop_all_dma(priv);
4268
4269         priv->hw->mac->set_mac(priv->ioaddr, false);
4270         netif_carrier_off(ndev);
4271         unregister_netdev(ndev);
4272         if (priv->plat->stmmac_rst)
4273                 reset_control_assert(priv->plat->stmmac_rst);
4274         clk_disable_unprepare(priv->plat->pclk);
4275         clk_disable_unprepare(priv->plat->stmmac_clk);
4276         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4277             priv->hw->pcs != STMMAC_PCS_TBI &&
4278             priv->hw->pcs != STMMAC_PCS_RTBI)
4279                 stmmac_mdio_unregister(ndev);
4280         free_netdev(ndev);
4281
4282         return 0;
4283 }
4284 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4285
4286 /**
4287  * stmmac_suspend - suspend callback
4288  * @dev: device pointer
4289  * Description: this is the function to suspend the device and it is called
4290  * by the platform driver to stop the network queue, release the resources,
4291  * program the PMT register (for WoL), clean and release driver resources.
4292  */
4293 int stmmac_suspend(struct device *dev)
4294 {
4295         struct net_device *ndev = dev_get_drvdata(dev);
4296         struct stmmac_priv *priv = netdev_priv(ndev);
4297         unsigned long flags;
4298
4299         if (!ndev || !netif_running(ndev))
4300                 return 0;
4301
4302         if (ndev->phydev)
4303                 phy_stop(ndev->phydev);
4304
4305         spin_lock_irqsave(&priv->lock, flags);
4306
4307         netif_device_detach(ndev);
4308         stmmac_stop_all_queues(priv);
4309
4310         stmmac_disable_all_queues(priv);
4311
4312         /* Stop TX/RX DMA */
4313         stmmac_stop_all_dma(priv);
4314
4315         /* Enable Power down mode by programming the PMT regs */
4316         if (device_may_wakeup(priv->device)) {
4317                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4318                 priv->irq_wake = 1;
4319         } else {
4320                 priv->hw->mac->set_mac(priv->ioaddr, false);
4321                 pinctrl_pm_select_sleep_state(priv->device);
4322                 /* Disable clock in case of PWM is off */
4323                 clk_disable(priv->plat->pclk);
4324                 clk_disable(priv->plat->stmmac_clk);
4325         }
4326         spin_unlock_irqrestore(&priv->lock, flags);
4327
4328         priv->oldlink = false;
4329         priv->speed = SPEED_UNKNOWN;
4330         priv->oldduplex = DUPLEX_UNKNOWN;
4331         return 0;
4332 }
4333 EXPORT_SYMBOL_GPL(stmmac_suspend);
4334
4335 /**
4336  * stmmac_reset_queues_param - reset queue parameters
4337  * @dev: device pointer
4338  */
4339 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4340 {
4341         u32 rx_cnt = priv->plat->rx_queues_to_use;
4342         u32 tx_cnt = priv->plat->tx_queues_to_use;
4343         u32 queue;
4344
4345         for (queue = 0; queue < rx_cnt; queue++) {
4346                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4347
4348                 rx_q->cur_rx = 0;
4349                 rx_q->dirty_rx = 0;
4350         }
4351
4352         for (queue = 0; queue < tx_cnt; queue++) {
4353                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4354
4355                 tx_q->cur_tx = 0;
4356                 tx_q->dirty_tx = 0;
4357         }
4358 }
4359
4360 /**
4361  * stmmac_resume - resume callback
4362  * @dev: device pointer
4363  * Description: when resume this function is invoked to setup the DMA and CORE
4364  * in a usable state.
4365  */
4366 int stmmac_resume(struct device *dev)
4367 {
4368         struct net_device *ndev = dev_get_drvdata(dev);
4369         struct stmmac_priv *priv = netdev_priv(ndev);
4370         unsigned long flags;
4371
4372         if (!netif_running(ndev))
4373                 return 0;
4374
4375         /* Power Down bit, into the PM register, is cleared
4376          * automatically as soon as a magic packet or a Wake-up frame
4377          * is received. Anyway, it's better to manually clear
4378          * this bit because it can generate problems while resuming
4379          * from another devices (e.g. serial console).
4380          */
4381         if (device_may_wakeup(priv->device)) {
4382                 spin_lock_irqsave(&priv->lock, flags);
4383                 priv->hw->mac->pmt(priv->hw, 0);
4384                 spin_unlock_irqrestore(&priv->lock, flags);
4385                 priv->irq_wake = 0;
4386         } else {
4387                 pinctrl_pm_select_default_state(priv->device);
4388                 /* enable the clk previously disabled */
4389                 clk_enable(priv->plat->stmmac_clk);
4390                 clk_enable(priv->plat->pclk);
4391                 /* reset the phy so that it's ready */
4392                 if (priv->mii)
4393                         stmmac_mdio_reset(priv->mii);
4394         }
4395
4396         netif_device_attach(ndev);
4397
4398         spin_lock_irqsave(&priv->lock, flags);
4399
4400         stmmac_reset_queues_param(priv);
4401
4402         /* reset private mss value to force mss context settings at
4403          * next tso xmit (only used for gmac4).
4404          */
4405         priv->mss = 0;
4406
4407         stmmac_clear_descriptors(priv);
4408
4409         stmmac_hw_setup(ndev, false);
4410         stmmac_init_tx_coalesce(priv);
4411         stmmac_set_rx_mode(ndev);
4412
4413         stmmac_enable_all_queues(priv);
4414
4415         stmmac_start_all_queues(priv);
4416
4417         spin_unlock_irqrestore(&priv->lock, flags);
4418
4419         if (ndev->phydev)
4420                 phy_start(ndev->phydev);
4421
4422         return 0;
4423 }
4424 EXPORT_SYMBOL_GPL(stmmac_resume);
4425
4426 #ifndef MODULE
4427 static int __init stmmac_cmdline_opt(char *str)
4428 {
4429         char *opt;
4430
4431         if (!str || !*str)
4432                 return -EINVAL;
4433         while ((opt = strsep(&str, ",")) != NULL) {
4434                 if (!strncmp(opt, "debug:", 6)) {
4435                         if (kstrtoint(opt + 6, 0, &debug))
4436                                 goto err;
4437                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4438                         if (kstrtoint(opt + 8, 0, &phyaddr))
4439                                 goto err;
4440                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4441                         if (kstrtoint(opt + 7, 0, &buf_sz))
4442                                 goto err;
4443                 } else if (!strncmp(opt, "tc:", 3)) {
4444                         if (kstrtoint(opt + 3, 0, &tc))
4445                                 goto err;
4446                 } else if (!strncmp(opt, "watchdog:", 9)) {
4447                         if (kstrtoint(opt + 9, 0, &watchdog))
4448                                 goto err;
4449                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4450                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4451                                 goto err;
4452                 } else if (!strncmp(opt, "pause:", 6)) {
4453                         if (kstrtoint(opt + 6, 0, &pause))
4454                                 goto err;
4455                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4456                         if (kstrtoint(opt + 10, 0, &eee_timer))
4457                                 goto err;
4458                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4459                         if (kstrtoint(opt + 11, 0, &chain_mode))
4460                                 goto err;
4461                 }
4462         }
4463         return 0;
4464
4465 err:
4466         pr_err("%s: ERROR broken module parameter conversion", __func__);
4467         return -EINVAL;
4468 }
4469
4470 __setup("stmmaceth=", stmmac_cmdline_opt);
4471 #endif /* MODULE */
4472
4473 static int __init stmmac_init(void)
4474 {
4475 #ifdef CONFIG_DEBUG_FS
4476         /* Create debugfs main directory if it doesn't exist yet */
4477         if (!stmmac_fs_dir) {
4478                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4479
4480                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4481                         pr_err("ERROR %s, debugfs create directory failed\n",
4482                                STMMAC_RESOURCE_NAME);
4483
4484                         return -ENOMEM;
4485                 }
4486         }
4487 #endif
4488
4489         return 0;
4490 }
4491
4492 static void __exit stmmac_exit(void)
4493 {
4494 #ifdef CONFIG_DEBUG_FS
4495         debugfs_remove_recursive(stmmac_fs_dir);
4496 #endif
4497 }
4498
4499 module_init(stmmac_init)
4500 module_exit(stmmac_exit)
4501
4502 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4503 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4504 MODULE_LICENSE("GPL");