Merge branch 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[sfrench/cifs-2.6.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148         u32 queue;
149
150         for (queue = 0; queue < rx_queues_cnt; queue++) {
151                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152
153                 napi_disable(&rx_q->napi);
154         }
155 }
156
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164         u32 queue;
165
166         for (queue = 0; queue < rx_queues_cnt; queue++) {
167                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168
169                 napi_enable(&rx_q->napi);
170         }
171 }
172
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180         u32 queue;
181
182         for (queue = 0; queue < tx_queues_cnt; queue++)
183                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193         u32 queue;
194
195         for (queue = 0; queue < tx_queues_cnt; queue++)
196                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *      If a specific clk_csr value is passed from the platform
206  *      this means that the CSR Clock Range selection cannot be
207  *      changed at run-time and it is fixed (as reported in the driver
208  *      documentation). Viceversa the driver will try to set the MDC
209  *      clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213         u32 clk_rate;
214
215         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216
217         /* Platform provided default clk_csr would be assumed valid
218          * for all other cases except for the below mentioned ones.
219          * For values higher than the IEEE 802.3 specified frequency
220          * we can not estimate the proper divider as it is not known
221          * the frequency of clk_csr_i. So we do not change the default
222          * divider.
223          */
224         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225                 if (clk_rate < CSR_F_35M)
226                         priv->clk_csr = STMMAC_CSR_20_35M;
227                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228                         priv->clk_csr = STMMAC_CSR_35_60M;
229                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230                         priv->clk_csr = STMMAC_CSR_60_100M;
231                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232                         priv->clk_csr = STMMAC_CSR_100_150M;
233                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234                         priv->clk_csr = STMMAC_CSR_150_250M;
235                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236                         priv->clk_csr = STMMAC_CSR_250_300M;
237         }
238
239         if (priv->plat->has_sun8i) {
240                 if (clk_rate > 160000000)
241                         priv->clk_csr = 0x03;
242                 else if (clk_rate > 80000000)
243                         priv->clk_csr = 0x02;
244                 else if (clk_rate > 40000000)
245                         priv->clk_csr = 0x01;
246                 else
247                         priv->clk_csr = 0;
248         }
249 }
250
251 static void print_pkt(unsigned char *buf, int len)
252 {
253         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260         u32 avail;
261
262         if (tx_q->dirty_tx > tx_q->cur_tx)
263                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264         else
265                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266
267         return avail;
268 }
269
270 /**
271  * stmmac_rx_dirty - Get RX queue dirty
272  * @priv: driver private structure
273  * @queue: RX queue index
274  */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278         u32 dirty;
279
280         if (rx_q->dirty_rx <= rx_q->cur_rx)
281                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
282         else
283                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284
285         return dirty;
286 }
287
288 /**
289  * stmmac_hw_fix_mac_speed - callback for speed selection
290  * @priv: driver private structure
291  * Description: on some platforms (e.g. ST), some HW system configuration
292  * registers have to be set according to the link speed negotiated.
293  */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296         struct net_device *ndev = priv->dev;
297         struct phy_device *phydev = ndev->phydev;
298
299         if (likely(priv->plat->fix_mac_speed))
300                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302
303 /**
304  * stmmac_enable_eee_mode - check and enter in LPI mode
305  * @priv: driver private structure
306  * Description: this function is to verify and enter in LPI mode in case of
307  * EEE.
308  */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311         u32 tx_cnt = priv->plat->tx_queues_to_use;
312         u32 queue;
313
314         /* check if all TX queues have the work finished */
315         for (queue = 0; queue < tx_cnt; queue++) {
316                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317
318                 if (tx_q->dirty_tx != tx_q->cur_tx)
319                         return; /* still unfinished work */
320         }
321
322         /* Check and enter in LPI mode */
323         if (!priv->tx_path_in_lpi_mode)
324                 priv->hw->mac->set_eee_mode(priv->hw,
325                                             priv->plat->en_tx_lpi_clockgating);
326 }
327
328 /**
329  * stmmac_disable_eee_mode - disable and exit from LPI mode
330  * @priv: driver private structure
331  * Description: this function is to exit and disable EEE in case of
332  * LPI state is true. This is called by the xmit.
333  */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336         priv->hw->mac->reset_eee_mode(priv->hw);
337         del_timer_sync(&priv->eee_ctrl_timer);
338         priv->tx_path_in_lpi_mode = false;
339 }
340
341 /**
342  * stmmac_eee_ctrl_timer - EEE TX SW timer.
343  * @arg : data hook
344  * Description:
345  *  if there is no data transfer and if we are not in LPI state,
346  *  then MAC Transmitter can be moved to LPI state.
347  */
348 static void stmmac_eee_ctrl_timer(struct timer_list *t)
349 {
350         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
351
352         stmmac_enable_eee_mode(priv);
353         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355
356 /**
357  * stmmac_eee_init - init EEE
358  * @priv: driver private structure
359  * Description:
360  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
361  *  can also manage EEE, this function enable the LPI state and start related
362  *  timer.
363  */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366         struct net_device *ndev = priv->dev;
367         unsigned long flags;
368         bool ret = false;
369
370         /* Using PCS we cannot dial with the phy registers at this stage
371          * so we do not support extra feature like EEE.
372          */
373         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
374             (priv->hw->pcs == STMMAC_PCS_TBI) ||
375             (priv->hw->pcs == STMMAC_PCS_RTBI))
376                 goto out;
377
378         /* MAC core supports the EEE feature. */
379         if (priv->dma_cap.eee) {
380                 int tx_lpi_timer = priv->tx_lpi_timer;
381
382                 /* Check if the PHY supports EEE */
383                 if (phy_init_eee(ndev->phydev, 1)) {
384                         /* To manage at run-time if the EEE cannot be supported
385                          * anymore (for example because the lp caps have been
386                          * changed).
387                          * In that case the driver disable own timers.
388                          */
389                         spin_lock_irqsave(&priv->lock, flags);
390                         if (priv->eee_active) {
391                                 netdev_dbg(priv->dev, "disable EEE\n");
392                                 del_timer_sync(&priv->eee_ctrl_timer);
393                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
394                                                              tx_lpi_timer);
395                         }
396                         priv->eee_active = 0;
397                         spin_unlock_irqrestore(&priv->lock, flags);
398                         goto out;
399                 }
400                 /* Activate the EEE and start timers */
401                 spin_lock_irqsave(&priv->lock, flags);
402                 if (!priv->eee_active) {
403                         priv->eee_active = 1;
404                         timer_setup(&priv->eee_ctrl_timer,
405                                     stmmac_eee_ctrl_timer, 0);
406                         mod_timer(&priv->eee_ctrl_timer,
407                                   STMMAC_LPI_T(eee_timer));
408
409                         priv->hw->mac->set_eee_timer(priv->hw,
410                                                      STMMAC_DEFAULT_LIT_LS,
411                                                      tx_lpi_timer);
412                 }
413                 /* Set HW EEE according to the speed */
414                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
415
416                 ret = true;
417                 spin_unlock_irqrestore(&priv->lock, flags);
418
419                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
420         }
421 out:
422         return ret;
423 }
424
425 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
426  * @priv: driver private structure
427  * @p : descriptor pointer
428  * @skb : the socket buffer
429  * Description :
430  * This function will read timestamp from the descriptor & pass it to stack.
431  * and also perform some sanity checks.
432  */
433 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
434                                    struct dma_desc *p, struct sk_buff *skb)
435 {
436         struct skb_shared_hwtstamps shhwtstamp;
437         u64 ns;
438
439         if (!priv->hwts_tx_en)
440                 return;
441
442         /* exit if skb doesn't support hw tstamp */
443         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
444                 return;
445
446         /* check tx tstamp status */
447         if (priv->hw->desc->get_tx_timestamp_status(p)) {
448                 /* get the valid tstamp */
449                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
450
451                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
452                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
453
454                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
455                 /* pass tstamp to stack */
456                 skb_tstamp_tx(skb, &shhwtstamp);
457         }
458
459         return;
460 }
461
462 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
463  * @priv: driver private structure
464  * @p : descriptor pointer
465  * @np : next descriptor pointer
466  * @skb : the socket buffer
467  * Description :
468  * This function will read received packet's timestamp from the descriptor
469  * and pass it to stack. It also perform some sanity checks.
470  */
471 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
472                                    struct dma_desc *np, struct sk_buff *skb)
473 {
474         struct skb_shared_hwtstamps *shhwtstamp = NULL;
475         struct dma_desc *desc = p;
476         u64 ns;
477
478         if (!priv->hwts_rx_en)
479                 return;
480         /* For GMAC4, the valid timestamp is from CTX next desc. */
481         if (priv->plat->has_gmac4)
482                 desc = np;
483
484         /* Check if timestamp is available */
485         if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
486                 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
487                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
488                 shhwtstamp = skb_hwtstamps(skb);
489                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
490                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
491         } else  {
492                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
493         }
494 }
495
496 /**
497  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
498  *  @dev: device pointer.
499  *  @ifr: An IOCTL specific structure, that can contain a pointer to
500  *  a proprietary structure used to pass information to the driver.
501  *  Description:
502  *  This function configures the MAC to enable/disable both outgoing(TX)
503  *  and incoming(RX) packets time stamping based on user input.
504  *  Return Value:
505  *  0 on success and an appropriate -ve integer on failure.
506  */
507 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
508 {
509         struct stmmac_priv *priv = netdev_priv(dev);
510         struct hwtstamp_config config;
511         struct timespec64 now;
512         u64 temp = 0;
513         u32 ptp_v2 = 0;
514         u32 tstamp_all = 0;
515         u32 ptp_over_ipv4_udp = 0;
516         u32 ptp_over_ipv6_udp = 0;
517         u32 ptp_over_ethernet = 0;
518         u32 snap_type_sel = 0;
519         u32 ts_master_en = 0;
520         u32 ts_event_en = 0;
521         u32 value = 0;
522         u32 sec_inc;
523
524         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
525                 netdev_alert(priv->dev, "No support for HW time stamping\n");
526                 priv->hwts_tx_en = 0;
527                 priv->hwts_rx_en = 0;
528
529                 return -EOPNOTSUPP;
530         }
531
532         if (copy_from_user(&config, ifr->ifr_data,
533                            sizeof(struct hwtstamp_config)))
534                 return -EFAULT;
535
536         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
537                    __func__, config.flags, config.tx_type, config.rx_filter);
538
539         /* reserved for future extensions */
540         if (config.flags)
541                 return -EINVAL;
542
543         if (config.tx_type != HWTSTAMP_TX_OFF &&
544             config.tx_type != HWTSTAMP_TX_ON)
545                 return -ERANGE;
546
547         if (priv->adv_ts) {
548                 switch (config.rx_filter) {
549                 case HWTSTAMP_FILTER_NONE:
550                         /* time stamp no incoming packet at all */
551                         config.rx_filter = HWTSTAMP_FILTER_NONE;
552                         break;
553
554                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
555                         /* PTP v1, UDP, any kind of event packet */
556                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
557                         /* take time stamp for all event messages */
558                         if (priv->plat->has_gmac4)
559                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
560                         else
561                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
562
563                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
564                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
565                         break;
566
567                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
568                         /* PTP v1, UDP, Sync packet */
569                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
570                         /* take time stamp for SYNC messages only */
571                         ts_event_en = PTP_TCR_TSEVNTENA;
572
573                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
574                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
575                         break;
576
577                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
578                         /* PTP v1, UDP, Delay_req packet */
579                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
580                         /* take time stamp for Delay_Req messages only */
581                         ts_master_en = PTP_TCR_TSMSTRENA;
582                         ts_event_en = PTP_TCR_TSEVNTENA;
583
584                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
585                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
586                         break;
587
588                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
589                         /* PTP v2, UDP, any kind of event packet */
590                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
591                         ptp_v2 = PTP_TCR_TSVER2ENA;
592                         /* take time stamp for all event messages */
593                         if (priv->plat->has_gmac4)
594                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
595                         else
596                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
597
598                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
599                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
600                         break;
601
602                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
603                         /* PTP v2, UDP, Sync packet */
604                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
605                         ptp_v2 = PTP_TCR_TSVER2ENA;
606                         /* take time stamp for SYNC messages only */
607                         ts_event_en = PTP_TCR_TSEVNTENA;
608
609                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
610                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
611                         break;
612
613                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
614                         /* PTP v2, UDP, Delay_req packet */
615                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
616                         ptp_v2 = PTP_TCR_TSVER2ENA;
617                         /* take time stamp for Delay_Req messages only */
618                         ts_master_en = PTP_TCR_TSMSTRENA;
619                         ts_event_en = PTP_TCR_TSEVNTENA;
620
621                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
622                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
623                         break;
624
625                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
626                         /* PTP v2/802.AS1 any layer, any kind of event packet */
627                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
628                         ptp_v2 = PTP_TCR_TSVER2ENA;
629                         /* take time stamp for all event messages */
630                         if (priv->plat->has_gmac4)
631                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
632                         else
633                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
634
635                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
636                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
637                         ptp_over_ethernet = PTP_TCR_TSIPENA;
638                         break;
639
640                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
641                         /* PTP v2/802.AS1, any layer, Sync packet */
642                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
643                         ptp_v2 = PTP_TCR_TSVER2ENA;
644                         /* take time stamp for SYNC messages only */
645                         ts_event_en = PTP_TCR_TSEVNTENA;
646
647                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
648                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
649                         ptp_over_ethernet = PTP_TCR_TSIPENA;
650                         break;
651
652                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
653                         /* PTP v2/802.AS1, any layer, Delay_req packet */
654                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
655                         ptp_v2 = PTP_TCR_TSVER2ENA;
656                         /* take time stamp for Delay_Req messages only */
657                         ts_master_en = PTP_TCR_TSMSTRENA;
658                         ts_event_en = PTP_TCR_TSEVNTENA;
659
660                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662                         ptp_over_ethernet = PTP_TCR_TSIPENA;
663                         break;
664
665                 case HWTSTAMP_FILTER_NTP_ALL:
666                 case HWTSTAMP_FILTER_ALL:
667                         /* time stamp any incoming packet */
668                         config.rx_filter = HWTSTAMP_FILTER_ALL;
669                         tstamp_all = PTP_TCR_TSENALL;
670                         break;
671
672                 default:
673                         return -ERANGE;
674                 }
675         } else {
676                 switch (config.rx_filter) {
677                 case HWTSTAMP_FILTER_NONE:
678                         config.rx_filter = HWTSTAMP_FILTER_NONE;
679                         break;
680                 default:
681                         /* PTP v1, UDP, any kind of event packet */
682                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
683                         break;
684                 }
685         }
686         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
687         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
688
689         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
690                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
691         else {
692                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
693                          tstamp_all | ptp_v2 | ptp_over_ethernet |
694                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
695                          ts_master_en | snap_type_sel);
696                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
697
698                 /* program Sub Second Increment reg */
699                 sec_inc = priv->hw->ptp->config_sub_second_increment(
700                         priv->ptpaddr, priv->plat->clk_ptp_rate,
701                         priv->plat->has_gmac4);
702                 temp = div_u64(1000000000ULL, sec_inc);
703
704                 /* calculate default added value:
705                  * formula is :
706                  * addend = (2^32)/freq_div_ratio;
707                  * where, freq_div_ratio = 1e9ns/sec_inc
708                  */
709                 temp = (u64)(temp << 32);
710                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
711                 priv->hw->ptp->config_addend(priv->ptpaddr,
712                                              priv->default_addend);
713
714                 /* initialize system time */
715                 ktime_get_real_ts64(&now);
716
717                 /* lower 32 bits of tv_sec are safe until y2106 */
718                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
719                                             now.tv_nsec);
720         }
721
722         return copy_to_user(ifr->ifr_data, &config,
723                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
724 }
725
726 /**
727  * stmmac_init_ptp - init PTP
728  * @priv: driver private structure
729  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
730  * This is done by looking at the HW cap. register.
731  * This function also registers the ptp driver.
732  */
733 static int stmmac_init_ptp(struct stmmac_priv *priv)
734 {
735         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
736                 return -EOPNOTSUPP;
737
738         priv->adv_ts = 0;
739         /* Check if adv_ts can be enabled for dwmac 4.x core */
740         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
741                 priv->adv_ts = 1;
742         /* Dwmac 3.x core with extend_desc can support adv_ts */
743         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
744                 priv->adv_ts = 1;
745
746         if (priv->dma_cap.time_stamp)
747                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
748
749         if (priv->adv_ts)
750                 netdev_info(priv->dev,
751                             "IEEE 1588-2008 Advanced Timestamp supported\n");
752
753         priv->hw->ptp = &stmmac_ptp;
754         priv->hwts_tx_en = 0;
755         priv->hwts_rx_en = 0;
756
757         stmmac_ptp_register(priv);
758
759         return 0;
760 }
761
762 static void stmmac_release_ptp(struct stmmac_priv *priv)
763 {
764         if (priv->plat->clk_ptp_ref)
765                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
766         stmmac_ptp_unregister(priv);
767 }
768
769 /**
770  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
771  *  @priv: driver private structure
772  *  Description: It is used for configuring the flow control in all queues
773  */
774 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
775 {
776         u32 tx_cnt = priv->plat->tx_queues_to_use;
777
778         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
779                                  priv->pause, tx_cnt);
780 }
781
782 /**
783  * stmmac_adjust_link - adjusts the link parameters
784  * @dev: net device structure
785  * Description: this is the helper called by the physical abstraction layer
786  * drivers to communicate the phy link status. According the speed and duplex
787  * this driver can invoke registered glue-logic as well.
788  * It also invoke the eee initialization because it could happen when switch
789  * on different networks (that are eee capable).
790  */
791 static void stmmac_adjust_link(struct net_device *dev)
792 {
793         struct stmmac_priv *priv = netdev_priv(dev);
794         struct phy_device *phydev = dev->phydev;
795         unsigned long flags;
796         bool new_state = false;
797
798         if (!phydev)
799                 return;
800
801         spin_lock_irqsave(&priv->lock, flags);
802
803         if (phydev->link) {
804                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
805
806                 /* Now we make sure that we can be in full duplex mode.
807                  * If not, we operate in half-duplex mode. */
808                 if (phydev->duplex != priv->oldduplex) {
809                         new_state = true;
810                         if (!phydev->duplex)
811                                 ctrl &= ~priv->hw->link.duplex;
812                         else
813                                 ctrl |= priv->hw->link.duplex;
814                         priv->oldduplex = phydev->duplex;
815                 }
816                 /* Flow Control operation */
817                 if (phydev->pause)
818                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
819
820                 if (phydev->speed != priv->speed) {
821                         new_state = true;
822                         ctrl &= ~priv->hw->link.speed_mask;
823                         switch (phydev->speed) {
824                         case SPEED_1000:
825                                 ctrl |= priv->hw->link.speed1000;
826                                 break;
827                         case SPEED_100:
828                                 ctrl |= priv->hw->link.speed100;
829                                 break;
830                         case SPEED_10:
831                                 ctrl |= priv->hw->link.speed10;
832                                 break;
833                         default:
834                                 netif_warn(priv, link, priv->dev,
835                                            "broken speed: %d\n", phydev->speed);
836                                 phydev->speed = SPEED_UNKNOWN;
837                                 break;
838                         }
839                         if (phydev->speed != SPEED_UNKNOWN)
840                                 stmmac_hw_fix_mac_speed(priv);
841                         priv->speed = phydev->speed;
842                 }
843
844                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
845
846                 if (!priv->oldlink) {
847                         new_state = true;
848                         priv->oldlink = true;
849                 }
850         } else if (priv->oldlink) {
851                 new_state = true;
852                 priv->oldlink = false;
853                 priv->speed = SPEED_UNKNOWN;
854                 priv->oldduplex = DUPLEX_UNKNOWN;
855         }
856
857         if (new_state && netif_msg_link(priv))
858                 phy_print_status(phydev);
859
860         spin_unlock_irqrestore(&priv->lock, flags);
861
862         if (phydev->is_pseudo_fixed_link)
863                 /* Stop PHY layer to call the hook to adjust the link in case
864                  * of a switch is attached to the stmmac driver.
865                  */
866                 phydev->irq = PHY_IGNORE_INTERRUPT;
867         else
868                 /* At this stage, init the EEE if supported.
869                  * Never called in case of fixed_link.
870                  */
871                 priv->eee_enabled = stmmac_eee_init(priv);
872 }
873
874 /**
875  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
876  * @priv: driver private structure
877  * Description: this is to verify if the HW supports the PCS.
878  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
879  * configured for the TBI, RTBI, or SGMII PHY interface.
880  */
881 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
882 {
883         int interface = priv->plat->interface;
884
885         if (priv->dma_cap.pcs) {
886                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
887                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
888                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
889                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
890                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
891                         priv->hw->pcs = STMMAC_PCS_RGMII;
892                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
893                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
894                         priv->hw->pcs = STMMAC_PCS_SGMII;
895                 }
896         }
897 }
898
899 /**
900  * stmmac_init_phy - PHY initialization
901  * @dev: net device structure
902  * Description: it initializes the driver's PHY state, and attaches the PHY
903  * to the mac driver.
904  *  Return value:
905  *  0 on success
906  */
907 static int stmmac_init_phy(struct net_device *dev)
908 {
909         struct stmmac_priv *priv = netdev_priv(dev);
910         struct phy_device *phydev;
911         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
912         char bus_id[MII_BUS_ID_SIZE];
913         int interface = priv->plat->interface;
914         int max_speed = priv->plat->max_speed;
915         priv->oldlink = false;
916         priv->speed = SPEED_UNKNOWN;
917         priv->oldduplex = DUPLEX_UNKNOWN;
918
919         if (priv->plat->phy_node) {
920                 phydev = of_phy_connect(dev, priv->plat->phy_node,
921                                         &stmmac_adjust_link, 0, interface);
922         } else {
923                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
924                          priv->plat->bus_id);
925
926                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
927                          priv->plat->phy_addr);
928                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
929                            phy_id_fmt);
930
931                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
932                                      interface);
933         }
934
935         if (IS_ERR_OR_NULL(phydev)) {
936                 netdev_err(priv->dev, "Could not attach to PHY\n");
937                 if (!phydev)
938                         return -ENODEV;
939
940                 return PTR_ERR(phydev);
941         }
942
943         /* Stop Advertising 1000BASE Capability if interface is not GMII */
944         if ((interface == PHY_INTERFACE_MODE_MII) ||
945             (interface == PHY_INTERFACE_MODE_RMII) ||
946                 (max_speed < 1000 && max_speed > 0))
947                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
948                                          SUPPORTED_1000baseT_Full);
949
950         /*
951          * Broken HW is sometimes missing the pull-up resistor on the
952          * MDIO line, which results in reads to non-existent devices returning
953          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
954          * device as well.
955          * Note: phydev->phy_id is the result of reading the UID PHY registers.
956          */
957         if (!priv->plat->phy_node && phydev->phy_id == 0) {
958                 phy_disconnect(phydev);
959                 return -ENODEV;
960         }
961
962         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
963          * subsequent PHY polling, make sure we force a link transition if
964          * we have a UP/DOWN/UP transition
965          */
966         if (phydev->is_pseudo_fixed_link)
967                 phydev->irq = PHY_POLL;
968
969         phy_attached_info(phydev);
970         return 0;
971 }
972
973 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
974 {
975         u32 rx_cnt = priv->plat->rx_queues_to_use;
976         void *head_rx;
977         u32 queue;
978
979         /* Display RX rings */
980         for (queue = 0; queue < rx_cnt; queue++) {
981                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
982
983                 pr_info("\tRX Queue %u rings\n", queue);
984
985                 if (priv->extend_desc)
986                         head_rx = (void *)rx_q->dma_erx;
987                 else
988                         head_rx = (void *)rx_q->dma_rx;
989
990                 /* Display RX ring */
991                 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
992         }
993 }
994
995 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
996 {
997         u32 tx_cnt = priv->plat->tx_queues_to_use;
998         void *head_tx;
999         u32 queue;
1000
1001         /* Display TX rings */
1002         for (queue = 0; queue < tx_cnt; queue++) {
1003                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1004
1005                 pr_info("\tTX Queue %d rings\n", queue);
1006
1007                 if (priv->extend_desc)
1008                         head_tx = (void *)tx_q->dma_etx;
1009                 else
1010                         head_tx = (void *)tx_q->dma_tx;
1011
1012                 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1013         }
1014 }
1015
1016 static void stmmac_display_rings(struct stmmac_priv *priv)
1017 {
1018         /* Display RX ring */
1019         stmmac_display_rx_rings(priv);
1020
1021         /* Display TX ring */
1022         stmmac_display_tx_rings(priv);
1023 }
1024
1025 static int stmmac_set_bfsize(int mtu, int bufsize)
1026 {
1027         int ret = bufsize;
1028
1029         if (mtu >= BUF_SIZE_4KiB)
1030                 ret = BUF_SIZE_8KiB;
1031         else if (mtu >= BUF_SIZE_2KiB)
1032                 ret = BUF_SIZE_4KiB;
1033         else if (mtu > DEFAULT_BUFSIZE)
1034                 ret = BUF_SIZE_2KiB;
1035         else
1036                 ret = DEFAULT_BUFSIZE;
1037
1038         return ret;
1039 }
1040
1041 /**
1042  * stmmac_clear_rx_descriptors - clear RX descriptors
1043  * @priv: driver private structure
1044  * @queue: RX queue index
1045  * Description: this function is called to clear the RX descriptors
1046  * in case of both basic and extended descriptors are used.
1047  */
1048 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1049 {
1050         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1051         int i;
1052
1053         /* Clear the RX descriptors */
1054         for (i = 0; i < DMA_RX_SIZE; i++)
1055                 if (priv->extend_desc)
1056                         priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1057                                                      priv->use_riwt, priv->mode,
1058                                                      (i == DMA_RX_SIZE - 1));
1059                 else
1060                         priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1061                                                      priv->use_riwt, priv->mode,
1062                                                      (i == DMA_RX_SIZE - 1));
1063 }
1064
1065 /**
1066  * stmmac_clear_tx_descriptors - clear tx descriptors
1067  * @priv: driver private structure
1068  * @queue: TX queue index.
1069  * Description: this function is called to clear the TX descriptors
1070  * in case of both basic and extended descriptors are used.
1071  */
1072 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1073 {
1074         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1075         int i;
1076
1077         /* Clear the TX descriptors */
1078         for (i = 0; i < DMA_TX_SIZE; i++)
1079                 if (priv->extend_desc)
1080                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1081                                                      priv->mode,
1082                                                      (i == DMA_TX_SIZE - 1));
1083                 else
1084                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1085                                                      priv->mode,
1086                                                      (i == DMA_TX_SIZE - 1));
1087 }
1088
1089 /**
1090  * stmmac_clear_descriptors - clear descriptors
1091  * @priv: driver private structure
1092  * Description: this function is called to clear the TX and RX descriptors
1093  * in case of both basic and extended descriptors are used.
1094  */
1095 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1096 {
1097         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1098         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1099         u32 queue;
1100
1101         /* Clear the RX descriptors */
1102         for (queue = 0; queue < rx_queue_cnt; queue++)
1103                 stmmac_clear_rx_descriptors(priv, queue);
1104
1105         /* Clear the TX descriptors */
1106         for (queue = 0; queue < tx_queue_cnt; queue++)
1107                 stmmac_clear_tx_descriptors(priv, queue);
1108 }
1109
1110 /**
1111  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1112  * @priv: driver private structure
1113  * @p: descriptor pointer
1114  * @i: descriptor index
1115  * @flags: gfp flag
1116  * @queue: RX queue index
1117  * Description: this function is called to allocate a receive buffer, perform
1118  * the DMA mapping and init the descriptor.
1119  */
1120 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1121                                   int i, gfp_t flags, u32 queue)
1122 {
1123         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1124         struct sk_buff *skb;
1125
1126         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1127         if (!skb) {
1128                 netdev_err(priv->dev,
1129                            "%s: Rx init fails; skb is NULL\n", __func__);
1130                 return -ENOMEM;
1131         }
1132         rx_q->rx_skbuff[i] = skb;
1133         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1134                                                 priv->dma_buf_sz,
1135                                                 DMA_FROM_DEVICE);
1136         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1137                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1138                 dev_kfree_skb_any(skb);
1139                 return -EINVAL;
1140         }
1141
1142         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1143                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1144         else
1145                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1146
1147         if ((priv->hw->mode->init_desc3) &&
1148             (priv->dma_buf_sz == BUF_SIZE_16KiB))
1149                 priv->hw->mode->init_desc3(p);
1150
1151         return 0;
1152 }
1153
1154 /**
1155  * stmmac_free_rx_buffer - free RX dma buffers
1156  * @priv: private structure
1157  * @queue: RX queue index
1158  * @i: buffer index.
1159  */
1160 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1161 {
1162         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1163
1164         if (rx_q->rx_skbuff[i]) {
1165                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1166                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1167                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1168         }
1169         rx_q->rx_skbuff[i] = NULL;
1170 }
1171
1172 /**
1173  * stmmac_free_tx_buffer - free RX dma buffers
1174  * @priv: private structure
1175  * @queue: RX queue index
1176  * @i: buffer index.
1177  */
1178 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1179 {
1180         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1181
1182         if (tx_q->tx_skbuff_dma[i].buf) {
1183                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1184                         dma_unmap_page(priv->device,
1185                                        tx_q->tx_skbuff_dma[i].buf,
1186                                        tx_q->tx_skbuff_dma[i].len,
1187                                        DMA_TO_DEVICE);
1188                 else
1189                         dma_unmap_single(priv->device,
1190                                          tx_q->tx_skbuff_dma[i].buf,
1191                                          tx_q->tx_skbuff_dma[i].len,
1192                                          DMA_TO_DEVICE);
1193         }
1194
1195         if (tx_q->tx_skbuff[i]) {
1196                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1197                 tx_q->tx_skbuff[i] = NULL;
1198                 tx_q->tx_skbuff_dma[i].buf = 0;
1199                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1200         }
1201 }
1202
1203 /**
1204  * init_dma_rx_desc_rings - init the RX descriptor rings
1205  * @dev: net device structure
1206  * @flags: gfp flag.
1207  * Description: this function initializes the DMA RX descriptors
1208  * and allocates the socket buffers. It supports the chained and ring
1209  * modes.
1210  */
1211 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1212 {
1213         struct stmmac_priv *priv = netdev_priv(dev);
1214         u32 rx_count = priv->plat->rx_queues_to_use;
1215         unsigned int bfsize = 0;
1216         int ret = -ENOMEM;
1217         int queue;
1218         int i;
1219
1220         if (priv->hw->mode->set_16kib_bfsize)
1221                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1222
1223         if (bfsize < BUF_SIZE_16KiB)
1224                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1225
1226         priv->dma_buf_sz = bfsize;
1227
1228         /* RX INITIALIZATION */
1229         netif_dbg(priv, probe, priv->dev,
1230                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1231
1232         for (queue = 0; queue < rx_count; queue++) {
1233                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1234
1235                 netif_dbg(priv, probe, priv->dev,
1236                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1237                           (u32)rx_q->dma_rx_phy);
1238
1239                 for (i = 0; i < DMA_RX_SIZE; i++) {
1240                         struct dma_desc *p;
1241
1242                         if (priv->extend_desc)
1243                                 p = &((rx_q->dma_erx + i)->basic);
1244                         else
1245                                 p = rx_q->dma_rx + i;
1246
1247                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1248                                                      queue);
1249                         if (ret)
1250                                 goto err_init_rx_buffers;
1251
1252                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1253                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1254                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1255                 }
1256
1257                 rx_q->cur_rx = 0;
1258                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1259
1260                 stmmac_clear_rx_descriptors(priv, queue);
1261
1262                 /* Setup the chained descriptor addresses */
1263                 if (priv->mode == STMMAC_CHAIN_MODE) {
1264                         if (priv->extend_desc)
1265                                 priv->hw->mode->init(rx_q->dma_erx,
1266                                                      rx_q->dma_rx_phy,
1267                                                      DMA_RX_SIZE, 1);
1268                         else
1269                                 priv->hw->mode->init(rx_q->dma_rx,
1270                                                      rx_q->dma_rx_phy,
1271                                                      DMA_RX_SIZE, 0);
1272                 }
1273         }
1274
1275         buf_sz = bfsize;
1276
1277         return 0;
1278
1279 err_init_rx_buffers:
1280         while (queue >= 0) {
1281                 while (--i >= 0)
1282                         stmmac_free_rx_buffer(priv, queue, i);
1283
1284                 if (queue == 0)
1285                         break;
1286
1287                 i = DMA_RX_SIZE;
1288                 queue--;
1289         }
1290
1291         return ret;
1292 }
1293
1294 /**
1295  * init_dma_tx_desc_rings - init the TX descriptor rings
1296  * @dev: net device structure.
1297  * Description: this function initializes the DMA TX descriptors
1298  * and allocates the socket buffers. It supports the chained and ring
1299  * modes.
1300  */
1301 static int init_dma_tx_desc_rings(struct net_device *dev)
1302 {
1303         struct stmmac_priv *priv = netdev_priv(dev);
1304         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1305         u32 queue;
1306         int i;
1307
1308         for (queue = 0; queue < tx_queue_cnt; queue++) {
1309                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1310
1311                 netif_dbg(priv, probe, priv->dev,
1312                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1313                          (u32)tx_q->dma_tx_phy);
1314
1315                 /* Setup the chained descriptor addresses */
1316                 if (priv->mode == STMMAC_CHAIN_MODE) {
1317                         if (priv->extend_desc)
1318                                 priv->hw->mode->init(tx_q->dma_etx,
1319                                                      tx_q->dma_tx_phy,
1320                                                      DMA_TX_SIZE, 1);
1321                         else
1322                                 priv->hw->mode->init(tx_q->dma_tx,
1323                                                      tx_q->dma_tx_phy,
1324                                                      DMA_TX_SIZE, 0);
1325                 }
1326
1327                 for (i = 0; i < DMA_TX_SIZE; i++) {
1328                         struct dma_desc *p;
1329                         if (priv->extend_desc)
1330                                 p = &((tx_q->dma_etx + i)->basic);
1331                         else
1332                                 p = tx_q->dma_tx + i;
1333
1334                         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1335                                 p->des0 = 0;
1336                                 p->des1 = 0;
1337                                 p->des2 = 0;
1338                                 p->des3 = 0;
1339                         } else {
1340                                 p->des2 = 0;
1341                         }
1342
1343                         tx_q->tx_skbuff_dma[i].buf = 0;
1344                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1345                         tx_q->tx_skbuff_dma[i].len = 0;
1346                         tx_q->tx_skbuff_dma[i].last_segment = false;
1347                         tx_q->tx_skbuff[i] = NULL;
1348                 }
1349
1350                 tx_q->dirty_tx = 0;
1351                 tx_q->cur_tx = 0;
1352
1353                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1354         }
1355
1356         return 0;
1357 }
1358
1359 /**
1360  * init_dma_desc_rings - init the RX/TX descriptor rings
1361  * @dev: net device structure
1362  * @flags: gfp flag.
1363  * Description: this function initializes the DMA RX/TX descriptors
1364  * and allocates the socket buffers. It supports the chained and ring
1365  * modes.
1366  */
1367 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1368 {
1369         struct stmmac_priv *priv = netdev_priv(dev);
1370         int ret;
1371
1372         ret = init_dma_rx_desc_rings(dev, flags);
1373         if (ret)
1374                 return ret;
1375
1376         ret = init_dma_tx_desc_rings(dev);
1377
1378         stmmac_clear_descriptors(priv);
1379
1380         if (netif_msg_hw(priv))
1381                 stmmac_display_rings(priv);
1382
1383         return ret;
1384 }
1385
1386 /**
1387  * dma_free_rx_skbufs - free RX dma buffers
1388  * @priv: private structure
1389  * @queue: RX queue index
1390  */
1391 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1392 {
1393         int i;
1394
1395         for (i = 0; i < DMA_RX_SIZE; i++)
1396                 stmmac_free_rx_buffer(priv, queue, i);
1397 }
1398
1399 /**
1400  * dma_free_tx_skbufs - free TX dma buffers
1401  * @priv: private structure
1402  * @queue: TX queue index
1403  */
1404 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1405 {
1406         int i;
1407
1408         for (i = 0; i < DMA_TX_SIZE; i++)
1409                 stmmac_free_tx_buffer(priv, queue, i);
1410 }
1411
1412 /**
1413  * free_dma_rx_desc_resources - free RX dma desc resources
1414  * @priv: private structure
1415  */
1416 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1417 {
1418         u32 rx_count = priv->plat->rx_queues_to_use;
1419         u32 queue;
1420
1421         /* Free RX queue resources */
1422         for (queue = 0; queue < rx_count; queue++) {
1423                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1424
1425                 /* Release the DMA RX socket buffers */
1426                 dma_free_rx_skbufs(priv, queue);
1427
1428                 /* Free DMA regions of consistent memory previously allocated */
1429                 if (!priv->extend_desc)
1430                         dma_free_coherent(priv->device,
1431                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1432                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1433                 else
1434                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1435                                           sizeof(struct dma_extended_desc),
1436                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1437
1438                 kfree(rx_q->rx_skbuff_dma);
1439                 kfree(rx_q->rx_skbuff);
1440         }
1441 }
1442
1443 /**
1444  * free_dma_tx_desc_resources - free TX dma desc resources
1445  * @priv: private structure
1446  */
1447 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1448 {
1449         u32 tx_count = priv->plat->tx_queues_to_use;
1450         u32 queue;
1451
1452         /* Free TX queue resources */
1453         for (queue = 0; queue < tx_count; queue++) {
1454                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1455
1456                 /* Release the DMA TX socket buffers */
1457                 dma_free_tx_skbufs(priv, queue);
1458
1459                 /* Free DMA regions of consistent memory previously allocated */
1460                 if (!priv->extend_desc)
1461                         dma_free_coherent(priv->device,
1462                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1463                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1464                 else
1465                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1466                                           sizeof(struct dma_extended_desc),
1467                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1468
1469                 kfree(tx_q->tx_skbuff_dma);
1470                 kfree(tx_q->tx_skbuff);
1471         }
1472 }
1473
1474 /**
1475  * alloc_dma_rx_desc_resources - alloc RX resources.
1476  * @priv: private structure
1477  * Description: according to which descriptor can be used (extend or basic)
1478  * this function allocates the resources for TX and RX paths. In case of
1479  * reception, for example, it pre-allocated the RX socket buffer in order to
1480  * allow zero-copy mechanism.
1481  */
1482 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1483 {
1484         u32 rx_count = priv->plat->rx_queues_to_use;
1485         int ret = -ENOMEM;
1486         u32 queue;
1487
1488         /* RX queues buffers and DMA */
1489         for (queue = 0; queue < rx_count; queue++) {
1490                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1491
1492                 rx_q->queue_index = queue;
1493                 rx_q->priv_data = priv;
1494
1495                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1496                                                     sizeof(dma_addr_t),
1497                                                     GFP_KERNEL);
1498                 if (!rx_q->rx_skbuff_dma)
1499                         goto err_dma;
1500
1501                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1502                                                 sizeof(struct sk_buff *),
1503                                                 GFP_KERNEL);
1504                 if (!rx_q->rx_skbuff)
1505                         goto err_dma;
1506
1507                 if (priv->extend_desc) {
1508                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1509                                                             DMA_RX_SIZE *
1510                                                             sizeof(struct
1511                                                             dma_extended_desc),
1512                                                             &rx_q->dma_rx_phy,
1513                                                             GFP_KERNEL);
1514                         if (!rx_q->dma_erx)
1515                                 goto err_dma;
1516
1517                 } else {
1518                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1519                                                            DMA_RX_SIZE *
1520                                                            sizeof(struct
1521                                                            dma_desc),
1522                                                            &rx_q->dma_rx_phy,
1523                                                            GFP_KERNEL);
1524                         if (!rx_q->dma_rx)
1525                                 goto err_dma;
1526                 }
1527         }
1528
1529         return 0;
1530
1531 err_dma:
1532         free_dma_rx_desc_resources(priv);
1533
1534         return ret;
1535 }
1536
1537 /**
1538  * alloc_dma_tx_desc_resources - alloc TX resources.
1539  * @priv: private structure
1540  * Description: according to which descriptor can be used (extend or basic)
1541  * this function allocates the resources for TX and RX paths. In case of
1542  * reception, for example, it pre-allocated the RX socket buffer in order to
1543  * allow zero-copy mechanism.
1544  */
1545 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1546 {
1547         u32 tx_count = priv->plat->tx_queues_to_use;
1548         int ret = -ENOMEM;
1549         u32 queue;
1550
1551         /* TX queues buffers and DMA */
1552         for (queue = 0; queue < tx_count; queue++) {
1553                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1554
1555                 tx_q->queue_index = queue;
1556                 tx_q->priv_data = priv;
1557
1558                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1559                                                     sizeof(*tx_q->tx_skbuff_dma),
1560                                                     GFP_KERNEL);
1561                 if (!tx_q->tx_skbuff_dma)
1562                         goto err_dma;
1563
1564                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1565                                                 sizeof(struct sk_buff *),
1566                                                 GFP_KERNEL);
1567                 if (!tx_q->tx_skbuff)
1568                         goto err_dma;
1569
1570                 if (priv->extend_desc) {
1571                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1572                                                             DMA_TX_SIZE *
1573                                                             sizeof(struct
1574                                                             dma_extended_desc),
1575                                                             &tx_q->dma_tx_phy,
1576                                                             GFP_KERNEL);
1577                         if (!tx_q->dma_etx)
1578                                 goto err_dma;
1579                 } else {
1580                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1581                                                            DMA_TX_SIZE *
1582                                                            sizeof(struct
1583                                                                   dma_desc),
1584                                                            &tx_q->dma_tx_phy,
1585                                                            GFP_KERNEL);
1586                         if (!tx_q->dma_tx)
1587                                 goto err_dma;
1588                 }
1589         }
1590
1591         return 0;
1592
1593 err_dma:
1594         free_dma_tx_desc_resources(priv);
1595
1596         return ret;
1597 }
1598
1599 /**
1600  * alloc_dma_desc_resources - alloc TX/RX resources.
1601  * @priv: private structure
1602  * Description: according to which descriptor can be used (extend or basic)
1603  * this function allocates the resources for TX and RX paths. In case of
1604  * reception, for example, it pre-allocated the RX socket buffer in order to
1605  * allow zero-copy mechanism.
1606  */
1607 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1608 {
1609         /* RX Allocation */
1610         int ret = alloc_dma_rx_desc_resources(priv);
1611
1612         if (ret)
1613                 return ret;
1614
1615         ret = alloc_dma_tx_desc_resources(priv);
1616
1617         return ret;
1618 }
1619
1620 /**
1621  * free_dma_desc_resources - free dma desc resources
1622  * @priv: private structure
1623  */
1624 static void free_dma_desc_resources(struct stmmac_priv *priv)
1625 {
1626         /* Release the DMA RX socket buffers */
1627         free_dma_rx_desc_resources(priv);
1628
1629         /* Release the DMA TX socket buffers */
1630         free_dma_tx_desc_resources(priv);
1631 }
1632
1633 /**
1634  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1635  *  @priv: driver private structure
1636  *  Description: It is used for enabling the rx queues in the MAC
1637  */
1638 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1639 {
1640         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1641         int queue;
1642         u8 mode;
1643
1644         for (queue = 0; queue < rx_queues_count; queue++) {
1645                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1646                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1647         }
1648 }
1649
1650 /**
1651  * stmmac_start_rx_dma - start RX DMA channel
1652  * @priv: driver private structure
1653  * @chan: RX channel index
1654  * Description:
1655  * This starts a RX DMA channel
1656  */
1657 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1658 {
1659         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1660         priv->hw->dma->start_rx(priv->ioaddr, chan);
1661 }
1662
1663 /**
1664  * stmmac_start_tx_dma - start TX DMA channel
1665  * @priv: driver private structure
1666  * @chan: TX channel index
1667  * Description:
1668  * This starts a TX DMA channel
1669  */
1670 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1671 {
1672         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1673         priv->hw->dma->start_tx(priv->ioaddr, chan);
1674 }
1675
1676 /**
1677  * stmmac_stop_rx_dma - stop RX DMA channel
1678  * @priv: driver private structure
1679  * @chan: RX channel index
1680  * Description:
1681  * This stops a RX DMA channel
1682  */
1683 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1684 {
1685         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1686         priv->hw->dma->stop_rx(priv->ioaddr, chan);
1687 }
1688
1689 /**
1690  * stmmac_stop_tx_dma - stop TX DMA channel
1691  * @priv: driver private structure
1692  * @chan: TX channel index
1693  * Description:
1694  * This stops a TX DMA channel
1695  */
1696 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1697 {
1698         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1699         priv->hw->dma->stop_tx(priv->ioaddr, chan);
1700 }
1701
1702 /**
1703  * stmmac_start_all_dma - start all RX and TX DMA channels
1704  * @priv: driver private structure
1705  * Description:
1706  * This starts all the RX and TX DMA channels
1707  */
1708 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1709 {
1710         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1711         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1712         u32 chan = 0;
1713
1714         for (chan = 0; chan < rx_channels_count; chan++)
1715                 stmmac_start_rx_dma(priv, chan);
1716
1717         for (chan = 0; chan < tx_channels_count; chan++)
1718                 stmmac_start_tx_dma(priv, chan);
1719 }
1720
1721 /**
1722  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1723  * @priv: driver private structure
1724  * Description:
1725  * This stops the RX and TX DMA channels
1726  */
1727 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1728 {
1729         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1730         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1731         u32 chan = 0;
1732
1733         for (chan = 0; chan < rx_channels_count; chan++)
1734                 stmmac_stop_rx_dma(priv, chan);
1735
1736         for (chan = 0; chan < tx_channels_count; chan++)
1737                 stmmac_stop_tx_dma(priv, chan);
1738 }
1739
1740 /**
1741  *  stmmac_dma_operation_mode - HW DMA operation mode
1742  *  @priv: driver private structure
1743  *  Description: it is used for configuring the DMA operation mode register in
1744  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1745  */
1746 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1747 {
1748         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1749         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1750         int rxfifosz = priv->plat->rx_fifo_size;
1751         int txfifosz = priv->plat->tx_fifo_size;
1752         u32 txmode = 0;
1753         u32 rxmode = 0;
1754         u32 chan = 0;
1755         u8 qmode = 0;
1756
1757         if (rxfifosz == 0)
1758                 rxfifosz = priv->dma_cap.rx_fifo_size;
1759         if (txfifosz == 0)
1760                 txfifosz = priv->dma_cap.tx_fifo_size;
1761
1762         /* Adjust for real per queue fifo size */
1763         rxfifosz /= rx_channels_count;
1764         txfifosz /= tx_channels_count;
1765
1766         if (priv->plat->force_thresh_dma_mode) {
1767                 txmode = tc;
1768                 rxmode = tc;
1769         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1770                 /*
1771                  * In case of GMAC, SF mode can be enabled
1772                  * to perform the TX COE in HW. This depends on:
1773                  * 1) TX COE if actually supported
1774                  * 2) There is no bugged Jumbo frame support
1775                  *    that needs to not insert csum in the TDES.
1776                  */
1777                 txmode = SF_DMA_MODE;
1778                 rxmode = SF_DMA_MODE;
1779                 priv->xstats.threshold = SF_DMA_MODE;
1780         } else {
1781                 txmode = tc;
1782                 rxmode = SF_DMA_MODE;
1783         }
1784
1785         /* configure all channels */
1786         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1787                 for (chan = 0; chan < rx_channels_count; chan++) {
1788                         qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1789
1790                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1791                                                    rxfifosz, qmode);
1792                 }
1793
1794                 for (chan = 0; chan < tx_channels_count; chan++) {
1795                         qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1796
1797                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1798                                                    txfifosz, qmode);
1799                 }
1800         } else {
1801                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1802                                         rxfifosz);
1803         }
1804 }
1805
1806 /**
1807  * stmmac_tx_clean - to manage the transmission completion
1808  * @priv: driver private structure
1809  * @queue: TX queue index
1810  * Description: it reclaims the transmit resources after transmission completes.
1811  */
1812 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1813 {
1814         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1815         unsigned int bytes_compl = 0, pkts_compl = 0;
1816         unsigned int entry;
1817
1818         netif_tx_lock(priv->dev);
1819
1820         priv->xstats.tx_clean++;
1821
1822         entry = tx_q->dirty_tx;
1823         while (entry != tx_q->cur_tx) {
1824                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1825                 struct dma_desc *p;
1826                 int status;
1827
1828                 if (priv->extend_desc)
1829                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1830                 else
1831                         p = tx_q->dma_tx + entry;
1832
1833                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1834                                                       &priv->xstats, p,
1835                                                       priv->ioaddr);
1836                 /* Check if the descriptor is owned by the DMA */
1837                 if (unlikely(status & tx_dma_own))
1838                         break;
1839
1840                 /* Just consider the last segment and ...*/
1841                 if (likely(!(status & tx_not_ls))) {
1842                         /* ... verify the status error condition */
1843                         if (unlikely(status & tx_err)) {
1844                                 priv->dev->stats.tx_errors++;
1845                         } else {
1846                                 priv->dev->stats.tx_packets++;
1847                                 priv->xstats.tx_pkt_n++;
1848                         }
1849                         stmmac_get_tx_hwtstamp(priv, p, skb);
1850                 }
1851
1852                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1853                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1854                                 dma_unmap_page(priv->device,
1855                                                tx_q->tx_skbuff_dma[entry].buf,
1856                                                tx_q->tx_skbuff_dma[entry].len,
1857                                                DMA_TO_DEVICE);
1858                         else
1859                                 dma_unmap_single(priv->device,
1860                                                  tx_q->tx_skbuff_dma[entry].buf,
1861                                                  tx_q->tx_skbuff_dma[entry].len,
1862                                                  DMA_TO_DEVICE);
1863                         tx_q->tx_skbuff_dma[entry].buf = 0;
1864                         tx_q->tx_skbuff_dma[entry].len = 0;
1865                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1866                 }
1867
1868                 if (priv->hw->mode->clean_desc3)
1869                         priv->hw->mode->clean_desc3(tx_q, p);
1870
1871                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1872                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1873
1874                 if (likely(skb != NULL)) {
1875                         pkts_compl++;
1876                         bytes_compl += skb->len;
1877                         dev_consume_skb_any(skb);
1878                         tx_q->tx_skbuff[entry] = NULL;
1879                 }
1880
1881                 priv->hw->desc->release_tx_desc(p, priv->mode);
1882
1883                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1884         }
1885         tx_q->dirty_tx = entry;
1886
1887         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1888                                   pkts_compl, bytes_compl);
1889
1890         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1891                                                                 queue))) &&
1892             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1893
1894                 netif_dbg(priv, tx_done, priv->dev,
1895                           "%s: restart transmit\n", __func__);
1896                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1897         }
1898
1899         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1900                 stmmac_enable_eee_mode(priv);
1901                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1902         }
1903         netif_tx_unlock(priv->dev);
1904 }
1905
1906 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1907 {
1908         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1909 }
1910
1911 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1912 {
1913         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1914 }
1915
1916 /**
1917  * stmmac_tx_err - to manage the tx error
1918  * @priv: driver private structure
1919  * @chan: channel index
1920  * Description: it cleans the descriptors and restarts the transmission
1921  * in case of transmission errors.
1922  */
1923 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1924 {
1925         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1926         int i;
1927
1928         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1929
1930         stmmac_stop_tx_dma(priv, chan);
1931         dma_free_tx_skbufs(priv, chan);
1932         for (i = 0; i < DMA_TX_SIZE; i++)
1933                 if (priv->extend_desc)
1934                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1935                                                      priv->mode,
1936                                                      (i == DMA_TX_SIZE - 1));
1937                 else
1938                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1939                                                      priv->mode,
1940                                                      (i == DMA_TX_SIZE - 1));
1941         tx_q->dirty_tx = 0;
1942         tx_q->cur_tx = 0;
1943         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1944         stmmac_start_tx_dma(priv, chan);
1945
1946         priv->dev->stats.tx_errors++;
1947         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1948 }
1949
1950 /**
1951  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1952  *  @priv: driver private structure
1953  *  @txmode: TX operating mode
1954  *  @rxmode: RX operating mode
1955  *  @chan: channel index
1956  *  Description: it is used for configuring of the DMA operation mode in
1957  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1958  *  mode.
1959  */
1960 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1961                                           u32 rxmode, u32 chan)
1962 {
1963         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1964         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1965         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1966         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1967         int rxfifosz = priv->plat->rx_fifo_size;
1968         int txfifosz = priv->plat->tx_fifo_size;
1969
1970         if (rxfifosz == 0)
1971                 rxfifosz = priv->dma_cap.rx_fifo_size;
1972         if (txfifosz == 0)
1973                 txfifosz = priv->dma_cap.tx_fifo_size;
1974
1975         /* Adjust for real per queue fifo size */
1976         rxfifosz /= rx_channels_count;
1977         txfifosz /= tx_channels_count;
1978
1979         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1980                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1981                                            rxfifosz, rxqmode);
1982                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1983                                            txfifosz, txqmode);
1984         } else {
1985                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1986                                         rxfifosz);
1987         }
1988 }
1989
1990 /**
1991  * stmmac_dma_interrupt - DMA ISR
1992  * @priv: driver private structure
1993  * Description: this is the DMA ISR. It is called by the main ISR.
1994  * It calls the dwmac dma routine and schedule poll method in case of some
1995  * work can be done.
1996  */
1997 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1998 {
1999         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2000         int status;
2001         u32 chan;
2002
2003         for (chan = 0; chan < tx_channel_count; chan++) {
2004                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2005
2006                 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
2007                                                       &priv->xstats, chan);
2008                 if (likely((status & handle_rx)) || (status & handle_tx)) {
2009                         if (likely(napi_schedule_prep(&rx_q->napi))) {
2010                                 stmmac_disable_dma_irq(priv, chan);
2011                                 __napi_schedule(&rx_q->napi);
2012                         }
2013                 }
2014
2015                 if (unlikely(status & tx_hard_error_bump_tc)) {
2016                         /* Try to bump up the dma threshold on this failure */
2017                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2018                             (tc <= 256)) {
2019                                 tc += 64;
2020                                 if (priv->plat->force_thresh_dma_mode)
2021                                         stmmac_set_dma_operation_mode(priv,
2022                                                                       tc,
2023                                                                       tc,
2024                                                                       chan);
2025                                 else
2026                                         stmmac_set_dma_operation_mode(priv,
2027                                                                     tc,
2028                                                                     SF_DMA_MODE,
2029                                                                     chan);
2030                                 priv->xstats.threshold = tc;
2031                         }
2032                 } else if (unlikely(status == tx_hard_error)) {
2033                         stmmac_tx_err(priv, chan);
2034                 }
2035         }
2036 }
2037
2038 /**
2039  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2040  * @priv: driver private structure
2041  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2042  */
2043 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2044 {
2045         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2046                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2047
2048         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2049                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2050                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2051         } else {
2052                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2053                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2054         }
2055
2056         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2057
2058         if (priv->dma_cap.rmon) {
2059                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2060                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2061         } else
2062                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2063 }
2064
2065 /**
2066  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2067  * @priv: driver private structure
2068  * Description: select the Enhanced/Alternate or Normal descriptors.
2069  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2070  * supported by the HW capability register.
2071  */
2072 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2073 {
2074         if (priv->plat->enh_desc) {
2075                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2076
2077                 /* GMAC older than 3.50 has no extended descriptors */
2078                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2079                         dev_info(priv->device, "Enabled extended descriptors\n");
2080                         priv->extend_desc = 1;
2081                 } else
2082                         dev_warn(priv->device, "Extended descriptors not supported\n");
2083
2084                 priv->hw->desc = &enh_desc_ops;
2085         } else {
2086                 dev_info(priv->device, "Normal descriptors\n");
2087                 priv->hw->desc = &ndesc_ops;
2088         }
2089 }
2090
2091 /**
2092  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2093  * @priv: driver private structure
2094  * Description:
2095  *  new GMAC chip generations have a new register to indicate the
2096  *  presence of the optional feature/functions.
2097  *  This can be also used to override the value passed through the
2098  *  platform and necessary for old MAC10/100 and GMAC chips.
2099  */
2100 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2101 {
2102         u32 ret = 0;
2103
2104         if (priv->hw->dma->get_hw_feature) {
2105                 priv->hw->dma->get_hw_feature(priv->ioaddr,
2106                                               &priv->dma_cap);
2107                 ret = 1;
2108         }
2109
2110         return ret;
2111 }
2112
2113 /**
2114  * stmmac_check_ether_addr - check if the MAC addr is valid
2115  * @priv: driver private structure
2116  * Description:
2117  * it is to verify if the MAC address is valid, in case of failures it
2118  * generates a random MAC address
2119  */
2120 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2121 {
2122         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2123                 priv->hw->mac->get_umac_addr(priv->hw,
2124                                              priv->dev->dev_addr, 0);
2125                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2126                         eth_hw_addr_random(priv->dev);
2127                 netdev_info(priv->dev, "device MAC address %pM\n",
2128                             priv->dev->dev_addr);
2129         }
2130 }
2131
2132 /**
2133  * stmmac_init_dma_engine - DMA init.
2134  * @priv: driver private structure
2135  * Description:
2136  * It inits the DMA invoking the specific MAC/GMAC callback.
2137  * Some DMA parameters can be passed from the platform;
2138  * in case of these are not passed a default is kept for the MAC or GMAC.
2139  */
2140 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2141 {
2142         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2143         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2144         struct stmmac_rx_queue *rx_q;
2145         struct stmmac_tx_queue *tx_q;
2146         u32 dummy_dma_rx_phy = 0;
2147         u32 dummy_dma_tx_phy = 0;
2148         u32 chan = 0;
2149         int atds = 0;
2150         int ret = 0;
2151
2152         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2153                 dev_err(priv->device, "Invalid DMA configuration\n");
2154                 return -EINVAL;
2155         }
2156
2157         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2158                 atds = 1;
2159
2160         ret = priv->hw->dma->reset(priv->ioaddr);
2161         if (ret) {
2162                 dev_err(priv->device, "Failed to reset the dma\n");
2163                 return ret;
2164         }
2165
2166         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2167                 /* DMA Configuration */
2168                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2169                                     dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2170
2171                 /* DMA RX Channel Configuration */
2172                 for (chan = 0; chan < rx_channels_count; chan++) {
2173                         rx_q = &priv->rx_queue[chan];
2174
2175                         priv->hw->dma->init_rx_chan(priv->ioaddr,
2176                                                     priv->plat->dma_cfg,
2177                                                     rx_q->dma_rx_phy, chan);
2178
2179                         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2180                                     (DMA_RX_SIZE * sizeof(struct dma_desc));
2181                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2182                                                        rx_q->rx_tail_addr,
2183                                                        chan);
2184                 }
2185
2186                 /* DMA TX Channel Configuration */
2187                 for (chan = 0; chan < tx_channels_count; chan++) {
2188                         tx_q = &priv->tx_queue[chan];
2189
2190                         priv->hw->dma->init_chan(priv->ioaddr,
2191                                                  priv->plat->dma_cfg,
2192                                                  chan);
2193
2194                         priv->hw->dma->init_tx_chan(priv->ioaddr,
2195                                                     priv->plat->dma_cfg,
2196                                                     tx_q->dma_tx_phy, chan);
2197
2198                         tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2199                                     (DMA_TX_SIZE * sizeof(struct dma_desc));
2200                         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2201                                                        tx_q->tx_tail_addr,
2202                                                        chan);
2203                 }
2204         } else {
2205                 rx_q = &priv->rx_queue[chan];
2206                 tx_q = &priv->tx_queue[chan];
2207                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2208                                     tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2209         }
2210
2211         if (priv->plat->axi && priv->hw->dma->axi)
2212                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2213
2214         return ret;
2215 }
2216
2217 /**
2218  * stmmac_tx_timer - mitigation sw timer for tx.
2219  * @data: data pointer
2220  * Description:
2221  * This is the timer handler to directly invoke the stmmac_tx_clean.
2222  */
2223 static void stmmac_tx_timer(struct timer_list *t)
2224 {
2225         struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2226         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2227         u32 queue;
2228
2229         /* let's scan all the tx queues */
2230         for (queue = 0; queue < tx_queues_count; queue++)
2231                 stmmac_tx_clean(priv, queue);
2232 }
2233
2234 /**
2235  * stmmac_init_tx_coalesce - init tx mitigation options.
2236  * @priv: driver private structure
2237  * Description:
2238  * This inits the transmit coalesce parameters: i.e. timer rate,
2239  * timer handler and default threshold used for enabling the
2240  * interrupt on completion bit.
2241  */
2242 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2243 {
2244         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2245         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2246         timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2247         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2248         add_timer(&priv->txtimer);
2249 }
2250
2251 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2252 {
2253         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2254         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2255         u32 chan;
2256
2257         /* set TX ring length */
2258         if (priv->hw->dma->set_tx_ring_len) {
2259                 for (chan = 0; chan < tx_channels_count; chan++)
2260                         priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2261                                                        (DMA_TX_SIZE - 1), chan);
2262         }
2263
2264         /* set RX ring length */
2265         if (priv->hw->dma->set_rx_ring_len) {
2266                 for (chan = 0; chan < rx_channels_count; chan++)
2267                         priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2268                                                        (DMA_RX_SIZE - 1), chan);
2269         }
2270 }
2271
2272 /**
2273  *  stmmac_set_tx_queue_weight - Set TX queue weight
2274  *  @priv: driver private structure
2275  *  Description: It is used for setting TX queues weight
2276  */
2277 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2278 {
2279         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2280         u32 weight;
2281         u32 queue;
2282
2283         for (queue = 0; queue < tx_queues_count; queue++) {
2284                 weight = priv->plat->tx_queues_cfg[queue].weight;
2285                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2286         }
2287 }
2288
2289 /**
2290  *  stmmac_configure_cbs - Configure CBS in TX queue
2291  *  @priv: driver private structure
2292  *  Description: It is used for configuring CBS in AVB TX queues
2293  */
2294 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2295 {
2296         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2297         u32 mode_to_use;
2298         u32 queue;
2299
2300         /* queue 0 is reserved for legacy traffic */
2301         for (queue = 1; queue < tx_queues_count; queue++) {
2302                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2303                 if (mode_to_use == MTL_QUEUE_DCB)
2304                         continue;
2305
2306                 priv->hw->mac->config_cbs(priv->hw,
2307                                 priv->plat->tx_queues_cfg[queue].send_slope,
2308                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2309                                 priv->plat->tx_queues_cfg[queue].high_credit,
2310                                 priv->plat->tx_queues_cfg[queue].low_credit,
2311                                 queue);
2312         }
2313 }
2314
2315 /**
2316  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2317  *  @priv: driver private structure
2318  *  Description: It is used for mapping RX queues to RX dma channels
2319  */
2320 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2321 {
2322         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2323         u32 queue;
2324         u32 chan;
2325
2326         for (queue = 0; queue < rx_queues_count; queue++) {
2327                 chan = priv->plat->rx_queues_cfg[queue].chan;
2328                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2329         }
2330 }
2331
2332 /**
2333  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2334  *  @priv: driver private structure
2335  *  Description: It is used for configuring the RX Queue Priority
2336  */
2337 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2338 {
2339         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2340         u32 queue;
2341         u32 prio;
2342
2343         for (queue = 0; queue < rx_queues_count; queue++) {
2344                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2345                         continue;
2346
2347                 prio = priv->plat->rx_queues_cfg[queue].prio;
2348                 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2349         }
2350 }
2351
2352 /**
2353  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2354  *  @priv: driver private structure
2355  *  Description: It is used for configuring the TX Queue Priority
2356  */
2357 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2358 {
2359         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2360         u32 queue;
2361         u32 prio;
2362
2363         for (queue = 0; queue < tx_queues_count; queue++) {
2364                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2365                         continue;
2366
2367                 prio = priv->plat->tx_queues_cfg[queue].prio;
2368                 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2369         }
2370 }
2371
2372 /**
2373  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2374  *  @priv: driver private structure
2375  *  Description: It is used for configuring the RX queue routing
2376  */
2377 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2378 {
2379         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2380         u32 queue;
2381         u8 packet;
2382
2383         for (queue = 0; queue < rx_queues_count; queue++) {
2384                 /* no specific packet type routing specified for the queue */
2385                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2386                         continue;
2387
2388                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2389                 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2390         }
2391 }
2392
2393 /**
2394  *  stmmac_mtl_configuration - Configure MTL
2395  *  @priv: driver private structure
2396  *  Description: It is used for configurring MTL
2397  */
2398 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2399 {
2400         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2401         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2402
2403         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2404                 stmmac_set_tx_queue_weight(priv);
2405
2406         /* Configure MTL RX algorithms */
2407         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2408                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2409                                                 priv->plat->rx_sched_algorithm);
2410
2411         /* Configure MTL TX algorithms */
2412         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2413                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2414                                                 priv->plat->tx_sched_algorithm);
2415
2416         /* Configure CBS in AVB TX queues */
2417         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2418                 stmmac_configure_cbs(priv);
2419
2420         /* Map RX MTL to DMA channels */
2421         if (priv->hw->mac->map_mtl_to_dma)
2422                 stmmac_rx_queue_dma_chan_map(priv);
2423
2424         /* Enable MAC RX Queues */
2425         if (priv->hw->mac->rx_queue_enable)
2426                 stmmac_mac_enable_rx_queues(priv);
2427
2428         /* Set RX priorities */
2429         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2430                 stmmac_mac_config_rx_queues_prio(priv);
2431
2432         /* Set TX priorities */
2433         if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2434                 stmmac_mac_config_tx_queues_prio(priv);
2435
2436         /* Set RX routing */
2437         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2438                 stmmac_mac_config_rx_queues_routing(priv);
2439 }
2440
2441 /**
2442  * stmmac_hw_setup - setup mac in a usable state.
2443  *  @dev : pointer to the device structure.
2444  *  Description:
2445  *  this is the main function to setup the HW in a usable state because the
2446  *  dma engine is reset, the core registers are configured (e.g. AXI,
2447  *  Checksum features, timers). The DMA is ready to start receiving and
2448  *  transmitting.
2449  *  Return value:
2450  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2451  *  file on failure.
2452  */
2453 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2454 {
2455         struct stmmac_priv *priv = netdev_priv(dev);
2456         u32 rx_cnt = priv->plat->rx_queues_to_use;
2457         u32 tx_cnt = priv->plat->tx_queues_to_use;
2458         u32 chan;
2459         int ret;
2460
2461         /* DMA initialization and SW reset */
2462         ret = stmmac_init_dma_engine(priv);
2463         if (ret < 0) {
2464                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2465                            __func__);
2466                 return ret;
2467         }
2468
2469         /* Copy the MAC addr into the HW  */
2470         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2471
2472         /* PS and related bits will be programmed according to the speed */
2473         if (priv->hw->pcs) {
2474                 int speed = priv->plat->mac_port_sel_speed;
2475
2476                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2477                     (speed == SPEED_1000)) {
2478                         priv->hw->ps = speed;
2479                 } else {
2480                         dev_warn(priv->device, "invalid port speed\n");
2481                         priv->hw->ps = 0;
2482                 }
2483         }
2484
2485         /* Initialize the MAC Core */
2486         priv->hw->mac->core_init(priv->hw, dev->mtu);
2487
2488         /* Initialize MTL*/
2489         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2490                 stmmac_mtl_configuration(priv);
2491
2492         ret = priv->hw->mac->rx_ipc(priv->hw);
2493         if (!ret) {
2494                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2495                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2496                 priv->hw->rx_csum = 0;
2497         }
2498
2499         /* Enable the MAC Rx/Tx */
2500         priv->hw->mac->set_mac(priv->ioaddr, true);
2501
2502         /* Set the HW DMA mode and the COE */
2503         stmmac_dma_operation_mode(priv);
2504
2505         stmmac_mmc_setup(priv);
2506
2507         if (init_ptp) {
2508                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2509                 if (ret < 0)
2510                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2511
2512                 ret = stmmac_init_ptp(priv);
2513                 if (ret == -EOPNOTSUPP)
2514                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2515                 else if (ret)
2516                         netdev_warn(priv->dev, "PTP init failed\n");
2517         }
2518
2519 #ifdef CONFIG_DEBUG_FS
2520         ret = stmmac_init_fs(dev);
2521         if (ret < 0)
2522                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2523                             __func__);
2524 #endif
2525         /* Start the ball rolling... */
2526         stmmac_start_all_dma(priv);
2527
2528         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2529
2530         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2531                 priv->rx_riwt = MAX_DMA_RIWT;
2532                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2533         }
2534
2535         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2536                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2537
2538         /* set TX and RX rings length */
2539         stmmac_set_rings_length(priv);
2540
2541         /* Enable TSO */
2542         if (priv->tso) {
2543                 for (chan = 0; chan < tx_cnt; chan++)
2544                         priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2545         }
2546
2547         return 0;
2548 }
2549
2550 static void stmmac_hw_teardown(struct net_device *dev)
2551 {
2552         struct stmmac_priv *priv = netdev_priv(dev);
2553
2554         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2555 }
2556
2557 /**
2558  *  stmmac_open - open entry point of the driver
2559  *  @dev : pointer to the device structure.
2560  *  Description:
2561  *  This function is the open entry point of the driver.
2562  *  Return value:
2563  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2564  *  file on failure.
2565  */
2566 static int stmmac_open(struct net_device *dev)
2567 {
2568         struct stmmac_priv *priv = netdev_priv(dev);
2569         int ret;
2570
2571         stmmac_check_ether_addr(priv);
2572
2573         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2574             priv->hw->pcs != STMMAC_PCS_TBI &&
2575             priv->hw->pcs != STMMAC_PCS_RTBI) {
2576                 ret = stmmac_init_phy(dev);
2577                 if (ret) {
2578                         netdev_err(priv->dev,
2579                                    "%s: Cannot attach to PHY (error: %d)\n",
2580                                    __func__, ret);
2581                         return ret;
2582                 }
2583         }
2584
2585         /* Extra statistics */
2586         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2587         priv->xstats.threshold = tc;
2588
2589         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2590         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2591         priv->mss = 0;
2592
2593         ret = alloc_dma_desc_resources(priv);
2594         if (ret < 0) {
2595                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2596                            __func__);
2597                 goto dma_desc_error;
2598         }
2599
2600         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2601         if (ret < 0) {
2602                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2603                            __func__);
2604                 goto init_error;
2605         }
2606
2607         ret = stmmac_hw_setup(dev, true);
2608         if (ret < 0) {
2609                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2610                 goto init_error;
2611         }
2612
2613         stmmac_init_tx_coalesce(priv);
2614
2615         if (dev->phydev)
2616                 phy_start(dev->phydev);
2617
2618         /* Request the IRQ lines */
2619         ret = request_irq(dev->irq, stmmac_interrupt,
2620                           IRQF_SHARED, dev->name, dev);
2621         if (unlikely(ret < 0)) {
2622                 netdev_err(priv->dev,
2623                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2624                            __func__, dev->irq, ret);
2625                 goto irq_error;
2626         }
2627
2628         /* Request the Wake IRQ in case of another line is used for WoL */
2629         if (priv->wol_irq != dev->irq) {
2630                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2631                                   IRQF_SHARED, dev->name, dev);
2632                 if (unlikely(ret < 0)) {
2633                         netdev_err(priv->dev,
2634                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2635                                    __func__, priv->wol_irq, ret);
2636                         goto wolirq_error;
2637                 }
2638         }
2639
2640         /* Request the IRQ lines */
2641         if (priv->lpi_irq > 0) {
2642                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2643                                   dev->name, dev);
2644                 if (unlikely(ret < 0)) {
2645                         netdev_err(priv->dev,
2646                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2647                                    __func__, priv->lpi_irq, ret);
2648                         goto lpiirq_error;
2649                 }
2650         }
2651
2652         stmmac_enable_all_queues(priv);
2653         stmmac_start_all_queues(priv);
2654
2655         return 0;
2656
2657 lpiirq_error:
2658         if (priv->wol_irq != dev->irq)
2659                 free_irq(priv->wol_irq, dev);
2660 wolirq_error:
2661         free_irq(dev->irq, dev);
2662 irq_error:
2663         if (dev->phydev)
2664                 phy_stop(dev->phydev);
2665
2666         del_timer_sync(&priv->txtimer);
2667         stmmac_hw_teardown(dev);
2668 init_error:
2669         free_dma_desc_resources(priv);
2670 dma_desc_error:
2671         if (dev->phydev)
2672                 phy_disconnect(dev->phydev);
2673
2674         return ret;
2675 }
2676
2677 /**
2678  *  stmmac_release - close entry point of the driver
2679  *  @dev : device pointer.
2680  *  Description:
2681  *  This is the stop entry point of the driver.
2682  */
2683 static int stmmac_release(struct net_device *dev)
2684 {
2685         struct stmmac_priv *priv = netdev_priv(dev);
2686
2687         if (priv->eee_enabled)
2688                 del_timer_sync(&priv->eee_ctrl_timer);
2689
2690         /* Stop and disconnect the PHY */
2691         if (dev->phydev) {
2692                 phy_stop(dev->phydev);
2693                 phy_disconnect(dev->phydev);
2694         }
2695
2696         stmmac_stop_all_queues(priv);
2697
2698         stmmac_disable_all_queues(priv);
2699
2700         del_timer_sync(&priv->txtimer);
2701
2702         /* Free the IRQ lines */
2703         free_irq(dev->irq, dev);
2704         if (priv->wol_irq != dev->irq)
2705                 free_irq(priv->wol_irq, dev);
2706         if (priv->lpi_irq > 0)
2707                 free_irq(priv->lpi_irq, dev);
2708
2709         /* Stop TX/RX DMA and clear the descriptors */
2710         stmmac_stop_all_dma(priv);
2711
2712         /* Release and free the Rx/Tx resources */
2713         free_dma_desc_resources(priv);
2714
2715         /* Disable the MAC Rx/Tx */
2716         priv->hw->mac->set_mac(priv->ioaddr, false);
2717
2718         netif_carrier_off(dev);
2719
2720 #ifdef CONFIG_DEBUG_FS
2721         stmmac_exit_fs(dev);
2722 #endif
2723
2724         stmmac_release_ptp(priv);
2725
2726         return 0;
2727 }
2728
2729 /**
2730  *  stmmac_tso_allocator - close entry point of the driver
2731  *  @priv: driver private structure
2732  *  @des: buffer start address
2733  *  @total_len: total length to fill in descriptors
2734  *  @last_segmant: condition for the last descriptor
2735  *  @queue: TX queue index
2736  *  Description:
2737  *  This function fills descriptor and request new descriptors according to
2738  *  buffer length to fill
2739  */
2740 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2741                                  int total_len, bool last_segment, u32 queue)
2742 {
2743         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2744         struct dma_desc *desc;
2745         u32 buff_size;
2746         int tmp_len;
2747
2748         tmp_len = total_len;
2749
2750         while (tmp_len > 0) {
2751                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2752                 desc = tx_q->dma_tx + tx_q->cur_tx;
2753
2754                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2755                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2756                             TSO_MAX_BUFF_SIZE : tmp_len;
2757
2758                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2759                         0, 1,
2760                         (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2761                         0, 0);
2762
2763                 tmp_len -= TSO_MAX_BUFF_SIZE;
2764         }
2765 }
2766
2767 /**
2768  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2769  *  @skb : the socket buffer
2770  *  @dev : device pointer
2771  *  Description: this is the transmit function that is called on TSO frames
2772  *  (support available on GMAC4 and newer chips).
2773  *  Diagram below show the ring programming in case of TSO frames:
2774  *
2775  *  First Descriptor
2776  *   --------
2777  *   | DES0 |---> buffer1 = L2/L3/L4 header
2778  *   | DES1 |---> TCP Payload (can continue on next descr...)
2779  *   | DES2 |---> buffer 1 and 2 len
2780  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2781  *   --------
2782  *      |
2783  *     ...
2784  *      |
2785  *   --------
2786  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2787  *   | DES1 | --|
2788  *   | DES2 | --> buffer 1 and 2 len
2789  *   | DES3 |
2790  *   --------
2791  *
2792  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2793  */
2794 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2795 {
2796         struct dma_desc *desc, *first, *mss_desc = NULL;
2797         struct stmmac_priv *priv = netdev_priv(dev);
2798         int nfrags = skb_shinfo(skb)->nr_frags;
2799         u32 queue = skb_get_queue_mapping(skb);
2800         unsigned int first_entry, des;
2801         struct stmmac_tx_queue *tx_q;
2802         int tmp_pay_len = 0;
2803         u32 pay_len, mss;
2804         u8 proto_hdr_len;
2805         int i;
2806
2807         tx_q = &priv->tx_queue[queue];
2808
2809         /* Compute header lengths */
2810         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2811
2812         /* Desc availability based on threshold should be enough safe */
2813         if (unlikely(stmmac_tx_avail(priv, queue) <
2814                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2815                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2816                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2817                                                                 queue));
2818                         /* This is a hard error, log it. */
2819                         netdev_err(priv->dev,
2820                                    "%s: Tx Ring full when queue awake\n",
2821                                    __func__);
2822                 }
2823                 return NETDEV_TX_BUSY;
2824         }
2825
2826         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2827
2828         mss = skb_shinfo(skb)->gso_size;
2829
2830         /* set new MSS value if needed */
2831         if (mss != priv->mss) {
2832                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2833                 priv->hw->desc->set_mss(mss_desc, mss);
2834                 priv->mss = mss;
2835                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2836         }
2837
2838         if (netif_msg_tx_queued(priv)) {
2839                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2840                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2841                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2842                         skb->data_len);
2843         }
2844
2845         first_entry = tx_q->cur_tx;
2846
2847         desc = tx_q->dma_tx + first_entry;
2848         first = desc;
2849
2850         /* first descriptor: fill Headers on Buf1 */
2851         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2852                              DMA_TO_DEVICE);
2853         if (dma_mapping_error(priv->device, des))
2854                 goto dma_map_err;
2855
2856         tx_q->tx_skbuff_dma[first_entry].buf = des;
2857         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2858
2859         first->des0 = cpu_to_le32(des);
2860
2861         /* Fill start of payload in buff2 of first descriptor */
2862         if (pay_len)
2863                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2864
2865         /* If needed take extra descriptors to fill the remaining payload */
2866         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2867
2868         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2869
2870         /* Prepare fragments */
2871         for (i = 0; i < nfrags; i++) {
2872                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2873
2874                 des = skb_frag_dma_map(priv->device, frag, 0,
2875                                        skb_frag_size(frag),
2876                                        DMA_TO_DEVICE);
2877                 if (dma_mapping_error(priv->device, des))
2878                         goto dma_map_err;
2879
2880                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2881                                      (i == nfrags - 1), queue);
2882
2883                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2884                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2885                 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2886                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2887         }
2888
2889         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2890
2891         /* Only the last descriptor gets to point to the skb. */
2892         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2893
2894         /* We've used all descriptors we need for this skb, however,
2895          * advance cur_tx so that it references a fresh descriptor.
2896          * ndo_start_xmit will fill this descriptor the next time it's
2897          * called and stmmac_tx_clean may clean up to this descriptor.
2898          */
2899         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2900
2901         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2902                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2903                           __func__);
2904                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2905         }
2906
2907         dev->stats.tx_bytes += skb->len;
2908         priv->xstats.tx_tso_frames++;
2909         priv->xstats.tx_tso_nfrags += nfrags;
2910
2911         /* Manage tx mitigation */
2912         priv->tx_count_frames += nfrags + 1;
2913         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2914                 mod_timer(&priv->txtimer,
2915                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2916         } else {
2917                 priv->tx_count_frames = 0;
2918                 priv->hw->desc->set_tx_ic(desc);
2919                 priv->xstats.tx_set_ic_bit++;
2920         }
2921
2922         skb_tx_timestamp(skb);
2923
2924         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2925                      priv->hwts_tx_en)) {
2926                 /* declare that device is doing timestamping */
2927                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2928                 priv->hw->desc->enable_tx_timestamp(first);
2929         }
2930
2931         /* Complete the first descriptor before granting the DMA */
2932         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2933                         proto_hdr_len,
2934                         pay_len,
2935                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2936                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2937
2938         /* If context desc is used to change MSS */
2939         if (mss_desc)
2940                 priv->hw->desc->set_tx_owner(mss_desc);
2941
2942         /* The own bit must be the latest setting done when prepare the
2943          * descriptor and then barrier is needed to make sure that
2944          * all is coherent before granting the DMA engine.
2945          */
2946         dma_wmb();
2947
2948         if (netif_msg_pktdata(priv)) {
2949                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2950                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2951                         tx_q->cur_tx, first, nfrags);
2952
2953                 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2954                                              0);
2955
2956                 pr_info(">>> frame to be transmitted: ");
2957                 print_pkt(skb->data, skb_headlen(skb));
2958         }
2959
2960         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2961
2962         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2963                                        queue);
2964
2965         return NETDEV_TX_OK;
2966
2967 dma_map_err:
2968         dev_err(priv->device, "Tx dma map failed\n");
2969         dev_kfree_skb(skb);
2970         priv->dev->stats.tx_dropped++;
2971         return NETDEV_TX_OK;
2972 }
2973
2974 /**
2975  *  stmmac_xmit - Tx entry point of the driver
2976  *  @skb : the socket buffer
2977  *  @dev : device pointer
2978  *  Description : this is the tx entry point of the driver.
2979  *  It programs the chain or the ring and supports oversized frames
2980  *  and SG feature.
2981  */
2982 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2983 {
2984         struct stmmac_priv *priv = netdev_priv(dev);
2985         unsigned int nopaged_len = skb_headlen(skb);
2986         int i, csum_insertion = 0, is_jumbo = 0;
2987         u32 queue = skb_get_queue_mapping(skb);
2988         int nfrags = skb_shinfo(skb)->nr_frags;
2989         int entry;
2990         unsigned int first_entry;
2991         struct dma_desc *desc, *first;
2992         struct stmmac_tx_queue *tx_q;
2993         unsigned int enh_desc;
2994         unsigned int des;
2995
2996         tx_q = &priv->tx_queue[queue];
2997
2998         /* Manage oversized TCP frames for GMAC4 device */
2999         if (skb_is_gso(skb) && priv->tso) {
3000                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3001                         return stmmac_tso_xmit(skb, dev);
3002         }
3003
3004         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3005                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3006                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3007                                                                 queue));
3008                         /* This is a hard error, log it. */
3009                         netdev_err(priv->dev,
3010                                    "%s: Tx Ring full when queue awake\n",
3011                                    __func__);
3012                 }
3013                 return NETDEV_TX_BUSY;
3014         }
3015
3016         if (priv->tx_path_in_lpi_mode)
3017                 stmmac_disable_eee_mode(priv);
3018
3019         entry = tx_q->cur_tx;
3020         first_entry = entry;
3021
3022         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3023
3024         if (likely(priv->extend_desc))
3025                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3026         else
3027                 desc = tx_q->dma_tx + entry;
3028
3029         first = desc;
3030
3031         enh_desc = priv->plat->enh_desc;
3032         /* To program the descriptors according to the size of the frame */
3033         if (enh_desc)
3034                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3035
3036         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3037                                          DWMAC_CORE_4_00)) {
3038                 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3039                 if (unlikely(entry < 0))
3040                         goto dma_map_err;
3041         }
3042
3043         for (i = 0; i < nfrags; i++) {
3044                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3045                 int len = skb_frag_size(frag);
3046                 bool last_segment = (i == (nfrags - 1));
3047
3048                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3049
3050                 if (likely(priv->extend_desc))
3051                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3052                 else
3053                         desc = tx_q->dma_tx + entry;
3054
3055                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3056                                        DMA_TO_DEVICE);
3057                 if (dma_mapping_error(priv->device, des))
3058                         goto dma_map_err; /* should reuse desc w/o issues */
3059
3060                 tx_q->tx_skbuff[entry] = NULL;
3061
3062                 tx_q->tx_skbuff_dma[entry].buf = des;
3063                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3064                         desc->des0 = cpu_to_le32(des);
3065                 else
3066                         desc->des2 = cpu_to_le32(des);
3067
3068                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3069                 tx_q->tx_skbuff_dma[entry].len = len;
3070                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3071
3072                 /* Prepare the descriptor and set the own bit too */
3073                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3074                                                 priv->mode, 1, last_segment,
3075                                                 skb->len);
3076         }
3077
3078         /* Only the last descriptor gets to point to the skb. */
3079         tx_q->tx_skbuff[entry] = skb;
3080
3081         /* We've used all descriptors we need for this skb, however,
3082          * advance cur_tx so that it references a fresh descriptor.
3083          * ndo_start_xmit will fill this descriptor the next time it's
3084          * called and stmmac_tx_clean may clean up to this descriptor.
3085          */
3086         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3087         tx_q->cur_tx = entry;
3088
3089         if (netif_msg_pktdata(priv)) {
3090                 void *tx_head;
3091
3092                 netdev_dbg(priv->dev,
3093                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3094                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3095                            entry, first, nfrags);
3096
3097                 if (priv->extend_desc)
3098                         tx_head = (void *)tx_q->dma_etx;
3099                 else
3100                         tx_head = (void *)tx_q->dma_tx;
3101
3102                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3103
3104                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3105                 print_pkt(skb->data, skb->len);
3106         }
3107
3108         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3109                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3110                           __func__);
3111                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3112         }
3113
3114         dev->stats.tx_bytes += skb->len;
3115
3116         /* According to the coalesce parameter the IC bit for the latest
3117          * segment is reset and the timer re-started to clean the tx status.
3118          * This approach takes care about the fragments: desc is the first
3119          * element in case of no SG.
3120          */
3121         priv->tx_count_frames += nfrags + 1;
3122         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3123                 mod_timer(&priv->txtimer,
3124                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3125         } else {
3126                 priv->tx_count_frames = 0;
3127                 priv->hw->desc->set_tx_ic(desc);
3128                 priv->xstats.tx_set_ic_bit++;
3129         }
3130
3131         skb_tx_timestamp(skb);
3132
3133         /* Ready to fill the first descriptor and set the OWN bit w/o any
3134          * problems because all the descriptors are actually ready to be
3135          * passed to the DMA engine.
3136          */
3137         if (likely(!is_jumbo)) {
3138                 bool last_segment = (nfrags == 0);
3139
3140                 des = dma_map_single(priv->device, skb->data,
3141                                      nopaged_len, DMA_TO_DEVICE);
3142                 if (dma_mapping_error(priv->device, des))
3143                         goto dma_map_err;
3144
3145                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3146                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3147                         first->des0 = cpu_to_le32(des);
3148                 else
3149                         first->des2 = cpu_to_le32(des);
3150
3151                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3152                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3153
3154                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3155                              priv->hwts_tx_en)) {
3156                         /* declare that device is doing timestamping */
3157                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3158                         priv->hw->desc->enable_tx_timestamp(first);
3159                 }
3160
3161                 /* Prepare the first descriptor setting the OWN bit too */
3162                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3163                                                 csum_insertion, priv->mode, 1,
3164                                                 last_segment, skb->len);
3165
3166                 /* The own bit must be the latest setting done when prepare the
3167                  * descriptor and then barrier is needed to make sure that
3168                  * all is coherent before granting the DMA engine.
3169                  */
3170                 dma_wmb();
3171         }
3172
3173         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3174
3175         if (priv->synopsys_id < DWMAC_CORE_4_00)
3176                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3177         else
3178                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3179                                                queue);
3180
3181         return NETDEV_TX_OK;
3182
3183 dma_map_err:
3184         netdev_err(priv->dev, "Tx DMA map failed\n");
3185         dev_kfree_skb(skb);
3186         priv->dev->stats.tx_dropped++;
3187         return NETDEV_TX_OK;
3188 }
3189
3190 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3191 {
3192         struct ethhdr *ehdr;
3193         u16 vlanid;
3194
3195         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3196             NETIF_F_HW_VLAN_CTAG_RX &&
3197             !__vlan_get_tag(skb, &vlanid)) {
3198                 /* pop the vlan tag */
3199                 ehdr = (struct ethhdr *)skb->data;
3200                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3201                 skb_pull(skb, VLAN_HLEN);
3202                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3203         }
3204 }
3205
3206
3207 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3208 {
3209         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3210                 return 0;
3211
3212         return 1;
3213 }
3214
3215 /**
3216  * stmmac_rx_refill - refill used skb preallocated buffers
3217  * @priv: driver private structure
3218  * @queue: RX queue index
3219  * Description : this is to reallocate the skb for the reception process
3220  * that is based on zero-copy.
3221  */
3222 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3223 {
3224         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3225         int dirty = stmmac_rx_dirty(priv, queue);
3226         unsigned int entry = rx_q->dirty_rx;
3227
3228         int bfsize = priv->dma_buf_sz;
3229
3230         while (dirty-- > 0) {
3231                 struct dma_desc *p;
3232
3233                 if (priv->extend_desc)
3234                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3235                 else
3236                         p = rx_q->dma_rx + entry;
3237
3238                 if (likely(!rx_q->rx_skbuff[entry])) {
3239                         struct sk_buff *skb;
3240
3241                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3242                         if (unlikely(!skb)) {
3243                                 /* so for a while no zero-copy! */
3244                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3245                                 if (unlikely(net_ratelimit()))
3246                                         dev_err(priv->device,
3247                                                 "fail to alloc skb entry %d\n",
3248                                                 entry);
3249                                 break;
3250                         }
3251
3252                         rx_q->rx_skbuff[entry] = skb;
3253                         rx_q->rx_skbuff_dma[entry] =
3254                             dma_map_single(priv->device, skb->data, bfsize,
3255                                            DMA_FROM_DEVICE);
3256                         if (dma_mapping_error(priv->device,
3257                                               rx_q->rx_skbuff_dma[entry])) {
3258                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3259                                 dev_kfree_skb(skb);
3260                                 break;
3261                         }
3262
3263                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3264                                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3265                                 p->des1 = 0;
3266                         } else {
3267                                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3268                         }
3269                         if (priv->hw->mode->refill_desc3)
3270                                 priv->hw->mode->refill_desc3(rx_q, p);
3271
3272                         if (rx_q->rx_zeroc_thresh > 0)
3273                                 rx_q->rx_zeroc_thresh--;
3274
3275                         netif_dbg(priv, rx_status, priv->dev,
3276                                   "refill entry #%d\n", entry);
3277                 }
3278                 dma_wmb();
3279
3280                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3281                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3282                 else
3283                         priv->hw->desc->set_rx_owner(p);
3284
3285                 dma_wmb();
3286
3287                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3288         }
3289         rx_q->dirty_rx = entry;
3290 }
3291
3292 /**
3293  * stmmac_rx - manage the receive process
3294  * @priv: driver private structure
3295  * @limit: napi bugget
3296  * @queue: RX queue index.
3297  * Description :  this the function called by the napi poll method.
3298  * It gets all the frames inside the ring.
3299  */
3300 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3301 {
3302         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3303         unsigned int entry = rx_q->cur_rx;
3304         int coe = priv->hw->rx_csum;
3305         unsigned int next_entry;
3306         unsigned int count = 0;
3307
3308         if (netif_msg_rx_status(priv)) {
3309                 void *rx_head;
3310
3311                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3312                 if (priv->extend_desc)
3313                         rx_head = (void *)rx_q->dma_erx;
3314                 else
3315                         rx_head = (void *)rx_q->dma_rx;
3316
3317                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3318         }
3319         while (count < limit) {
3320                 int status;
3321                 struct dma_desc *p;
3322                 struct dma_desc *np;
3323
3324                 if (priv->extend_desc)
3325                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3326                 else
3327                         p = rx_q->dma_rx + entry;
3328
3329                 /* read the status of the incoming frame */
3330                 status = priv->hw->desc->rx_status(&priv->dev->stats,
3331                                                    &priv->xstats, p);
3332                 /* check if managed by the DMA otherwise go ahead */
3333                 if (unlikely(status & dma_own))
3334                         break;
3335
3336                 count++;
3337
3338                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3339                 next_entry = rx_q->cur_rx;
3340
3341                 if (priv->extend_desc)
3342                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3343                 else
3344                         np = rx_q->dma_rx + next_entry;
3345
3346                 prefetch(np);
3347
3348                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3349                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
3350                                                            &priv->xstats,
3351                                                            rx_q->dma_erx +
3352                                                            entry);
3353                 if (unlikely(status == discard_frame)) {
3354                         priv->dev->stats.rx_errors++;
3355                         if (priv->hwts_rx_en && !priv->extend_desc) {
3356                                 /* DESC2 & DESC3 will be overwritten by device
3357                                  * with timestamp value, hence reinitialize
3358                                  * them in stmmac_rx_refill() function so that
3359                                  * device can reuse it.
3360                                  */
3361                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3362                                 rx_q->rx_skbuff[entry] = NULL;
3363                                 dma_unmap_single(priv->device,
3364                                                  rx_q->rx_skbuff_dma[entry],
3365                                                  priv->dma_buf_sz,
3366                                                  DMA_FROM_DEVICE);
3367                         }
3368                 } else {
3369                         struct sk_buff *skb;
3370                         int frame_len;
3371                         unsigned int des;
3372
3373                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3374                                 des = le32_to_cpu(p->des0);
3375                         else
3376                                 des = le32_to_cpu(p->des2);
3377
3378                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3379
3380                         /*  If frame length is greater than skb buffer size
3381                          *  (preallocated during init) then the packet is
3382                          *  ignored
3383                          */
3384                         if (frame_len > priv->dma_buf_sz) {
3385                                 netdev_err(priv->dev,
3386                                            "len %d larger than size (%d)\n",
3387                                            frame_len, priv->dma_buf_sz);
3388                                 priv->dev->stats.rx_length_errors++;
3389                                 break;
3390                         }
3391
3392                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3393                          * Type frames (LLC/LLC-SNAP)
3394                          */
3395                         if (unlikely(status != llc_snap))
3396                                 frame_len -= ETH_FCS_LEN;
3397
3398                         if (netif_msg_rx_status(priv)) {
3399                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3400                                            p, entry, des);
3401                                 if (frame_len > ETH_FRAME_LEN)
3402                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3403                                                    frame_len, status);
3404                         }
3405
3406                         /* The zero-copy is always used for all the sizes
3407                          * in case of GMAC4 because it needs
3408                          * to refill the used descriptors, always.
3409                          */
3410                         if (unlikely(!priv->plat->has_gmac4 &&
3411                                      ((frame_len < priv->rx_copybreak) ||
3412                                      stmmac_rx_threshold_count(rx_q)))) {
3413                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3414                                                                 frame_len);
3415                                 if (unlikely(!skb)) {
3416                                         if (net_ratelimit())
3417                                                 dev_warn(priv->device,
3418                                                          "packet dropped\n");
3419                                         priv->dev->stats.rx_dropped++;
3420                                         break;
3421                                 }
3422
3423                                 dma_sync_single_for_cpu(priv->device,
3424                                                         rx_q->rx_skbuff_dma
3425                                                         [entry], frame_len,
3426                                                         DMA_FROM_DEVICE);
3427                                 skb_copy_to_linear_data(skb,
3428                                                         rx_q->
3429                                                         rx_skbuff[entry]->data,
3430                                                         frame_len);
3431
3432                                 skb_put(skb, frame_len);
3433                                 dma_sync_single_for_device(priv->device,
3434                                                            rx_q->rx_skbuff_dma
3435                                                            [entry], frame_len,
3436                                                            DMA_FROM_DEVICE);
3437                         } else {
3438                                 skb = rx_q->rx_skbuff[entry];
3439                                 if (unlikely(!skb)) {
3440                                         netdev_err(priv->dev,
3441                                                    "%s: Inconsistent Rx chain\n",
3442                                                    priv->dev->name);
3443                                         priv->dev->stats.rx_dropped++;
3444                                         break;
3445                                 }
3446                                 prefetch(skb->data - NET_IP_ALIGN);
3447                                 rx_q->rx_skbuff[entry] = NULL;
3448                                 rx_q->rx_zeroc_thresh++;
3449
3450                                 skb_put(skb, frame_len);
3451                                 dma_unmap_single(priv->device,
3452                                                  rx_q->rx_skbuff_dma[entry],
3453                                                  priv->dma_buf_sz,
3454                                                  DMA_FROM_DEVICE);
3455                         }
3456
3457                         if (netif_msg_pktdata(priv)) {
3458                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3459                                            frame_len);
3460                                 print_pkt(skb->data, frame_len);
3461                         }
3462
3463                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3464
3465                         stmmac_rx_vlan(priv->dev, skb);
3466
3467                         skb->protocol = eth_type_trans(skb, priv->dev);
3468
3469                         if (unlikely(!coe))
3470                                 skb_checksum_none_assert(skb);
3471                         else
3472                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3473
3474                         napi_gro_receive(&rx_q->napi, skb);
3475
3476                         priv->dev->stats.rx_packets++;
3477                         priv->dev->stats.rx_bytes += frame_len;
3478                 }
3479                 entry = next_entry;
3480         }
3481
3482         stmmac_rx_refill(priv, queue);
3483
3484         priv->xstats.rx_pkt_n += count;
3485
3486         return count;
3487 }
3488
3489 /**
3490  *  stmmac_poll - stmmac poll method (NAPI)
3491  *  @napi : pointer to the napi structure.
3492  *  @budget : maximum number of packets that the current CPU can receive from
3493  *            all interfaces.
3494  *  Description :
3495  *  To look at the incoming frames and clear the tx resources.
3496  */
3497 static int stmmac_poll(struct napi_struct *napi, int budget)
3498 {
3499         struct stmmac_rx_queue *rx_q =
3500                 container_of(napi, struct stmmac_rx_queue, napi);
3501         struct stmmac_priv *priv = rx_q->priv_data;
3502         u32 tx_count = priv->plat->tx_queues_to_use;
3503         u32 chan = rx_q->queue_index;
3504         int work_done = 0;
3505         u32 queue;
3506
3507         priv->xstats.napi_poll++;
3508
3509         /* check all the queues */
3510         for (queue = 0; queue < tx_count; queue++)
3511                 stmmac_tx_clean(priv, queue);
3512
3513         work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3514         if (work_done < budget) {
3515                 napi_complete_done(napi, work_done);
3516                 stmmac_enable_dma_irq(priv, chan);
3517         }
3518         return work_done;
3519 }
3520
3521 /**
3522  *  stmmac_tx_timeout
3523  *  @dev : Pointer to net device structure
3524  *  Description: this function is called when a packet transmission fails to
3525  *   complete within a reasonable time. The driver will mark the error in the
3526  *   netdev structure and arrange for the device to be reset to a sane state
3527  *   in order to transmit a new packet.
3528  */
3529 static void stmmac_tx_timeout(struct net_device *dev)
3530 {
3531         struct stmmac_priv *priv = netdev_priv(dev);
3532         u32 tx_count = priv->plat->tx_queues_to_use;
3533         u32 chan;
3534
3535         /* Clear Tx resources and restart transmitting again */
3536         for (chan = 0; chan < tx_count; chan++)
3537                 stmmac_tx_err(priv, chan);
3538 }
3539
3540 /**
3541  *  stmmac_set_rx_mode - entry point for multicast addressing
3542  *  @dev : pointer to the device structure
3543  *  Description:
3544  *  This function is a driver entry point which gets called by the kernel
3545  *  whenever multicast addresses must be enabled/disabled.
3546  *  Return value:
3547  *  void.
3548  */
3549 static void stmmac_set_rx_mode(struct net_device *dev)
3550 {
3551         struct stmmac_priv *priv = netdev_priv(dev);
3552
3553         priv->hw->mac->set_filter(priv->hw, dev);
3554 }
3555
3556 /**
3557  *  stmmac_change_mtu - entry point to change MTU size for the device.
3558  *  @dev : device pointer.
3559  *  @new_mtu : the new MTU size for the device.
3560  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3561  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3562  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3563  *  Return value:
3564  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3565  *  file on failure.
3566  */
3567 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3568 {
3569         struct stmmac_priv *priv = netdev_priv(dev);
3570
3571         if (netif_running(dev)) {
3572                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3573                 return -EBUSY;
3574         }
3575
3576         dev->mtu = new_mtu;
3577
3578         netdev_update_features(dev);
3579
3580         return 0;
3581 }
3582
3583 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3584                                              netdev_features_t features)
3585 {
3586         struct stmmac_priv *priv = netdev_priv(dev);
3587
3588         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3589                 features &= ~NETIF_F_RXCSUM;
3590
3591         if (!priv->plat->tx_coe)
3592                 features &= ~NETIF_F_CSUM_MASK;
3593
3594         /* Some GMAC devices have a bugged Jumbo frame support that
3595          * needs to have the Tx COE disabled for oversized frames
3596          * (due to limited buffer sizes). In this case we disable
3597          * the TX csum insertion in the TDES and not use SF.
3598          */
3599         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3600                 features &= ~NETIF_F_CSUM_MASK;
3601
3602         /* Disable tso if asked by ethtool */
3603         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3604                 if (features & NETIF_F_TSO)
3605                         priv->tso = true;
3606                 else
3607                         priv->tso = false;
3608         }
3609
3610         return features;
3611 }
3612
3613 static int stmmac_set_features(struct net_device *netdev,
3614                                netdev_features_t features)
3615 {
3616         struct stmmac_priv *priv = netdev_priv(netdev);
3617
3618         /* Keep the COE Type in case of csum is supporting */
3619         if (features & NETIF_F_RXCSUM)
3620                 priv->hw->rx_csum = priv->plat->rx_coe;
3621         else
3622                 priv->hw->rx_csum = 0;
3623         /* No check needed because rx_coe has been set before and it will be
3624          * fixed in case of issue.
3625          */
3626         priv->hw->mac->rx_ipc(priv->hw);
3627
3628         return 0;
3629 }
3630
3631 /**
3632  *  stmmac_interrupt - main ISR
3633  *  @irq: interrupt number.
3634  *  @dev_id: to pass the net device pointer.
3635  *  Description: this is the main driver interrupt service routine.
3636  *  It can call:
3637  *  o DMA service routine (to manage incoming frame reception and transmission
3638  *    status)
3639  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3640  *    interrupts.
3641  */
3642 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3643 {
3644         struct net_device *dev = (struct net_device *)dev_id;
3645         struct stmmac_priv *priv = netdev_priv(dev);
3646         u32 rx_cnt = priv->plat->rx_queues_to_use;
3647         u32 tx_cnt = priv->plat->tx_queues_to_use;
3648         u32 queues_count;
3649         u32 queue;
3650
3651         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3652
3653         if (priv->irq_wake)
3654                 pm_wakeup_event(priv->device, 0);
3655
3656         if (unlikely(!dev)) {
3657                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3658                 return IRQ_NONE;
3659         }
3660
3661         /* To handle GMAC own interrupts */
3662         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3663                 int status = priv->hw->mac->host_irq_status(priv->hw,
3664                                                             &priv->xstats);
3665
3666                 if (unlikely(status)) {
3667                         /* For LPI we need to save the tx status */
3668                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3669                                 priv->tx_path_in_lpi_mode = true;
3670                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3671                                 priv->tx_path_in_lpi_mode = false;
3672                 }
3673
3674                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3675                         for (queue = 0; queue < queues_count; queue++) {
3676                                 struct stmmac_rx_queue *rx_q =
3677                                 &priv->rx_queue[queue];
3678
3679                                 status |=
3680                                 priv->hw->mac->host_mtl_irq_status(priv->hw,
3681                                                                    queue);
3682
3683                                 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3684                                     priv->hw->dma->set_rx_tail_ptr)
3685                                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3686                                                                 rx_q->rx_tail_addr,
3687                                                                 queue);
3688                         }
3689                 }
3690
3691                 /* PCS link status */
3692                 if (priv->hw->pcs) {
3693                         if (priv->xstats.pcs_link)
3694                                 netif_carrier_on(dev);
3695                         else
3696                                 netif_carrier_off(dev);
3697                 }
3698         }
3699
3700         /* To handle DMA interrupts */
3701         stmmac_dma_interrupt(priv);
3702
3703         return IRQ_HANDLED;
3704 }
3705
3706 #ifdef CONFIG_NET_POLL_CONTROLLER
3707 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3708  * to allow network I/O with interrupts disabled.
3709  */
3710 static void stmmac_poll_controller(struct net_device *dev)
3711 {
3712         disable_irq(dev->irq);
3713         stmmac_interrupt(dev->irq, dev);
3714         enable_irq(dev->irq);
3715 }
3716 #endif
3717
3718 /**
3719  *  stmmac_ioctl - Entry point for the Ioctl
3720  *  @dev: Device pointer.
3721  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3722  *  a proprietary structure used to pass information to the driver.
3723  *  @cmd: IOCTL command
3724  *  Description:
3725  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3726  */
3727 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3728 {
3729         int ret = -EOPNOTSUPP;
3730
3731         if (!netif_running(dev))
3732                 return -EINVAL;
3733
3734         switch (cmd) {
3735         case SIOCGMIIPHY:
3736         case SIOCGMIIREG:
3737         case SIOCSMIIREG:
3738                 if (!dev->phydev)
3739                         return -EINVAL;
3740                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3741                 break;
3742         case SIOCSHWTSTAMP:
3743                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3744                 break;
3745         default:
3746                 break;
3747         }
3748
3749         return ret;
3750 }
3751
3752 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3753 {
3754         struct stmmac_priv *priv = netdev_priv(ndev);
3755         int ret = 0;
3756
3757         ret = eth_mac_addr(ndev, addr);
3758         if (ret)
3759                 return ret;
3760
3761         priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
3762
3763         return ret;
3764 }
3765
3766 #ifdef CONFIG_DEBUG_FS
3767 static struct dentry *stmmac_fs_dir;
3768
3769 static void sysfs_display_ring(void *head, int size, int extend_desc,
3770                                struct seq_file *seq)
3771 {
3772         int i;
3773         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3774         struct dma_desc *p = (struct dma_desc *)head;
3775
3776         for (i = 0; i < size; i++) {
3777                 if (extend_desc) {
3778                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3779                                    i, (unsigned int)virt_to_phys(ep),
3780                                    le32_to_cpu(ep->basic.des0),
3781                                    le32_to_cpu(ep->basic.des1),
3782                                    le32_to_cpu(ep->basic.des2),
3783                                    le32_to_cpu(ep->basic.des3));
3784                         ep++;
3785                 } else {
3786                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3787                                    i, (unsigned int)virt_to_phys(p),
3788                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3789                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3790                         p++;
3791                 }
3792                 seq_printf(seq, "\n");
3793         }
3794 }
3795
3796 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3797 {
3798         struct net_device *dev = seq->private;
3799         struct stmmac_priv *priv = netdev_priv(dev);
3800         u32 rx_count = priv->plat->rx_queues_to_use;
3801         u32 tx_count = priv->plat->tx_queues_to_use;
3802         u32 queue;
3803
3804         for (queue = 0; queue < rx_count; queue++) {
3805                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3806
3807                 seq_printf(seq, "RX Queue %d:\n", queue);
3808
3809                 if (priv->extend_desc) {
3810                         seq_printf(seq, "Extended descriptor ring:\n");
3811                         sysfs_display_ring((void *)rx_q->dma_erx,
3812                                            DMA_RX_SIZE, 1, seq);
3813                 } else {
3814                         seq_printf(seq, "Descriptor ring:\n");
3815                         sysfs_display_ring((void *)rx_q->dma_rx,
3816                                            DMA_RX_SIZE, 0, seq);
3817                 }
3818         }
3819
3820         for (queue = 0; queue < tx_count; queue++) {
3821                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3822
3823                 seq_printf(seq, "TX Queue %d:\n", queue);
3824
3825                 if (priv->extend_desc) {
3826                         seq_printf(seq, "Extended descriptor ring:\n");
3827                         sysfs_display_ring((void *)tx_q->dma_etx,
3828                                            DMA_TX_SIZE, 1, seq);
3829                 } else {
3830                         seq_printf(seq, "Descriptor ring:\n");
3831                         sysfs_display_ring((void *)tx_q->dma_tx,
3832                                            DMA_TX_SIZE, 0, seq);
3833                 }
3834         }
3835
3836         return 0;
3837 }
3838
3839 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3840 {
3841         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3842 }
3843
3844 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3845
3846 static const struct file_operations stmmac_rings_status_fops = {
3847         .owner = THIS_MODULE,
3848         .open = stmmac_sysfs_ring_open,
3849         .read = seq_read,
3850         .llseek = seq_lseek,
3851         .release = single_release,
3852 };
3853
3854 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3855 {
3856         struct net_device *dev = seq->private;
3857         struct stmmac_priv *priv = netdev_priv(dev);
3858
3859         if (!priv->hw_cap_support) {
3860                 seq_printf(seq, "DMA HW features not supported\n");
3861                 return 0;
3862         }
3863
3864         seq_printf(seq, "==============================\n");
3865         seq_printf(seq, "\tDMA HW features\n");
3866         seq_printf(seq, "==============================\n");
3867
3868         seq_printf(seq, "\t10/100 Mbps: %s\n",
3869                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3870         seq_printf(seq, "\t1000 Mbps: %s\n",
3871                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3872         seq_printf(seq, "\tHalf duplex: %s\n",
3873                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3874         seq_printf(seq, "\tHash Filter: %s\n",
3875                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3876         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3877                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3878         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3879                    (priv->dma_cap.pcs) ? "Y" : "N");
3880         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3881                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3882         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3883                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3884         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3885                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3886         seq_printf(seq, "\tRMON module: %s\n",
3887                    (priv->dma_cap.rmon) ? "Y" : "N");
3888         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3889                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3890         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3891                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3892         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3893                    (priv->dma_cap.eee) ? "Y" : "N");
3894         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3895         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3896                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3897         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3898                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3899                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3900         } else {
3901                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3902                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3903                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3904                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3905         }
3906         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3907                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3908         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3909                    priv->dma_cap.number_rx_channel);
3910         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3911                    priv->dma_cap.number_tx_channel);
3912         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3913                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3914
3915         return 0;
3916 }
3917
3918 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3919 {
3920         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3921 }
3922
3923 static const struct file_operations stmmac_dma_cap_fops = {
3924         .owner = THIS_MODULE,
3925         .open = stmmac_sysfs_dma_cap_open,
3926         .read = seq_read,
3927         .llseek = seq_lseek,
3928         .release = single_release,
3929 };
3930
3931 static int stmmac_init_fs(struct net_device *dev)
3932 {
3933         struct stmmac_priv *priv = netdev_priv(dev);
3934
3935         /* Create per netdev entries */
3936         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3937
3938         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3939                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3940
3941                 return -ENOMEM;
3942         }
3943
3944         /* Entry to report DMA RX/TX rings */
3945         priv->dbgfs_rings_status =
3946                 debugfs_create_file("descriptors_status", S_IRUGO,
3947                                     priv->dbgfs_dir, dev,
3948                                     &stmmac_rings_status_fops);
3949
3950         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3951                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3952                 debugfs_remove_recursive(priv->dbgfs_dir);
3953
3954                 return -ENOMEM;
3955         }
3956
3957         /* Entry to report the DMA HW features */
3958         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3959                                             priv->dbgfs_dir,
3960                                             dev, &stmmac_dma_cap_fops);
3961
3962         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3963                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3964                 debugfs_remove_recursive(priv->dbgfs_dir);
3965
3966                 return -ENOMEM;
3967         }
3968
3969         return 0;
3970 }
3971
3972 static void stmmac_exit_fs(struct net_device *dev)
3973 {
3974         struct stmmac_priv *priv = netdev_priv(dev);
3975
3976         debugfs_remove_recursive(priv->dbgfs_dir);
3977 }
3978 #endif /* CONFIG_DEBUG_FS */
3979
3980 static const struct net_device_ops stmmac_netdev_ops = {
3981         .ndo_open = stmmac_open,
3982         .ndo_start_xmit = stmmac_xmit,
3983         .ndo_stop = stmmac_release,
3984         .ndo_change_mtu = stmmac_change_mtu,
3985         .ndo_fix_features = stmmac_fix_features,
3986         .ndo_set_features = stmmac_set_features,
3987         .ndo_set_rx_mode = stmmac_set_rx_mode,
3988         .ndo_tx_timeout = stmmac_tx_timeout,
3989         .ndo_do_ioctl = stmmac_ioctl,
3990 #ifdef CONFIG_NET_POLL_CONTROLLER
3991         .ndo_poll_controller = stmmac_poll_controller,
3992 #endif
3993         .ndo_set_mac_address = stmmac_set_mac_address,
3994 };
3995
3996 /**
3997  *  stmmac_hw_init - Init the MAC device
3998  *  @priv: driver private structure
3999  *  Description: this function is to configure the MAC device according to
4000  *  some platform parameters or the HW capability register. It prepares the
4001  *  driver to use either ring or chain modes and to setup either enhanced or
4002  *  normal descriptors.
4003  */
4004 static int stmmac_hw_init(struct stmmac_priv *priv)
4005 {
4006         struct mac_device_info *mac;
4007
4008         /* Identify the MAC HW device */
4009         if (priv->plat->setup) {
4010                 mac = priv->plat->setup(priv);
4011         } else if (priv->plat->has_gmac) {
4012                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
4013                 mac = dwmac1000_setup(priv->ioaddr,
4014                                       priv->plat->multicast_filter_bins,
4015                                       priv->plat->unicast_filter_entries,
4016                                       &priv->synopsys_id);
4017         } else if (priv->plat->has_gmac4) {
4018                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
4019                 mac = dwmac4_setup(priv->ioaddr,
4020                                    priv->plat->multicast_filter_bins,
4021                                    priv->plat->unicast_filter_entries,
4022                                    &priv->synopsys_id);
4023         } else {
4024                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
4025         }
4026         if (!mac)
4027                 return -ENOMEM;
4028
4029         priv->hw = mac;
4030
4031         /* dwmac-sun8i only work in chain mode */
4032         if (priv->plat->has_sun8i)
4033                 chain_mode = 1;
4034
4035         /* To use the chained or ring mode */
4036         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4037                 priv->hw->mode = &dwmac4_ring_mode_ops;
4038         } else {
4039                 if (chain_mode) {
4040                         priv->hw->mode = &chain_mode_ops;
4041                         dev_info(priv->device, "Chain mode enabled\n");
4042                         priv->mode = STMMAC_CHAIN_MODE;
4043                 } else {
4044                         priv->hw->mode = &ring_mode_ops;
4045                         dev_info(priv->device, "Ring mode enabled\n");
4046                         priv->mode = STMMAC_RING_MODE;
4047                 }
4048         }
4049
4050         /* Get the HW capability (new GMAC newer than 3.50a) */
4051         priv->hw_cap_support = stmmac_get_hw_features(priv);
4052         if (priv->hw_cap_support) {
4053                 dev_info(priv->device, "DMA HW capability register supported\n");
4054
4055                 /* We can override some gmac/dma configuration fields: e.g.
4056                  * enh_desc, tx_coe (e.g. that are passed through the
4057                  * platform) with the values from the HW capability
4058                  * register (if supported).
4059                  */
4060                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4061                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4062                 priv->hw->pmt = priv->plat->pmt;
4063
4064                 /* TXCOE doesn't work in thresh DMA mode */
4065                 if (priv->plat->force_thresh_dma_mode)
4066                         priv->plat->tx_coe = 0;
4067                 else
4068                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4069
4070                 /* In case of GMAC4 rx_coe is from HW cap register. */
4071                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4072
4073                 if (priv->dma_cap.rx_coe_type2)
4074                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4075                 else if (priv->dma_cap.rx_coe_type1)
4076                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4077
4078         } else {
4079                 dev_info(priv->device, "No HW DMA feature register supported\n");
4080         }
4081
4082         /* To use alternate (extended), normal or GMAC4 descriptor structures */
4083         if (priv->synopsys_id >= DWMAC_CORE_4_00)
4084                 priv->hw->desc = &dwmac4_desc_ops;
4085         else
4086                 stmmac_selec_desc_mode(priv);
4087
4088         if (priv->plat->rx_coe) {
4089                 priv->hw->rx_csum = priv->plat->rx_coe;
4090                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4091                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4092                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4093         }
4094         if (priv->plat->tx_coe)
4095                 dev_info(priv->device, "TX Checksum insertion supported\n");
4096
4097         if (priv->plat->pmt) {
4098                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4099                 device_set_wakeup_capable(priv->device, 1);
4100         }
4101
4102         if (priv->dma_cap.tsoen)
4103                 dev_info(priv->device, "TSO supported\n");
4104
4105         return 0;
4106 }
4107
4108 /**
4109  * stmmac_dvr_probe
4110  * @device: device pointer
4111  * @plat_dat: platform data pointer
4112  * @res: stmmac resource pointer
4113  * Description: this is the main probe function used to
4114  * call the alloc_etherdev, allocate the priv structure.
4115  * Return:
4116  * returns 0 on success, otherwise errno.
4117  */
4118 int stmmac_dvr_probe(struct device *device,
4119                      struct plat_stmmacenet_data *plat_dat,
4120                      struct stmmac_resources *res)
4121 {
4122         struct net_device *ndev = NULL;
4123         struct stmmac_priv *priv;
4124         int ret = 0;
4125         u32 queue;
4126
4127         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4128                                   MTL_MAX_TX_QUEUES,
4129                                   MTL_MAX_RX_QUEUES);
4130         if (!ndev)
4131                 return -ENOMEM;
4132
4133         SET_NETDEV_DEV(ndev, device);
4134
4135         priv = netdev_priv(ndev);
4136         priv->device = device;
4137         priv->dev = ndev;
4138
4139         stmmac_set_ethtool_ops(ndev);
4140         priv->pause = pause;
4141         priv->plat = plat_dat;
4142         priv->ioaddr = res->addr;
4143         priv->dev->base_addr = (unsigned long)res->addr;
4144
4145         priv->dev->irq = res->irq;
4146         priv->wol_irq = res->wol_irq;
4147         priv->lpi_irq = res->lpi_irq;
4148
4149         if (res->mac)
4150                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4151
4152         dev_set_drvdata(device, priv->dev);
4153
4154         /* Verify driver arguments */
4155         stmmac_verify_args();
4156
4157         /* Override with kernel parameters if supplied XXX CRS XXX
4158          * this needs to have multiple instances
4159          */
4160         if ((phyaddr >= 0) && (phyaddr <= 31))
4161                 priv->plat->phy_addr = phyaddr;
4162
4163         if (priv->plat->stmmac_rst) {
4164                 ret = reset_control_assert(priv->plat->stmmac_rst);
4165                 reset_control_deassert(priv->plat->stmmac_rst);
4166                 /* Some reset controllers have only reset callback instead of
4167                  * assert + deassert callbacks pair.
4168                  */
4169                 if (ret == -ENOTSUPP)
4170                         reset_control_reset(priv->plat->stmmac_rst);
4171         }
4172
4173         /* Init MAC and get the capabilities */
4174         ret = stmmac_hw_init(priv);
4175         if (ret)
4176                 goto error_hw_init;
4177
4178         /* Configure real RX and TX queues */
4179         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4180         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4181
4182         ndev->netdev_ops = &stmmac_netdev_ops;
4183
4184         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4185                             NETIF_F_RXCSUM;
4186
4187         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4188                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4189                 priv->tso = true;
4190                 dev_info(priv->device, "TSO feature enabled\n");
4191         }
4192         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4193         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4194 #ifdef STMMAC_VLAN_TAG_USED
4195         /* Both mac100 and gmac support receive VLAN tag detection */
4196         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4197 #endif
4198         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4199
4200         /* MTU range: 46 - hw-specific max */
4201         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4202         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4203                 ndev->max_mtu = JUMBO_LEN;
4204         else
4205                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4206         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4207          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4208          */
4209         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4210             (priv->plat->maxmtu >= ndev->min_mtu))
4211                 ndev->max_mtu = priv->plat->maxmtu;
4212         else if (priv->plat->maxmtu < ndev->min_mtu)
4213                 dev_warn(priv->device,
4214                          "%s: warning: maxmtu having invalid value (%d)\n",
4215                          __func__, priv->plat->maxmtu);
4216
4217         if (flow_ctrl)
4218                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4219
4220         /* Rx Watchdog is available in the COREs newer than the 3.40.
4221          * In some case, for example on bugged HW this feature
4222          * has to be disable and this can be done by passing the
4223          * riwt_off field from the platform.
4224          */
4225         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4226                 priv->use_riwt = 1;
4227                 dev_info(priv->device,
4228                          "Enable RX Mitigation via HW Watchdog Timer\n");
4229         }
4230
4231         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4232                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4233
4234                 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4235                                (8 * priv->plat->rx_queues_to_use));
4236         }
4237
4238         spin_lock_init(&priv->lock);
4239
4240         /* If a specific clk_csr value is passed from the platform
4241          * this means that the CSR Clock Range selection cannot be
4242          * changed at run-time and it is fixed. Viceversa the driver'll try to
4243          * set the MDC clock dynamically according to the csr actual
4244          * clock input.
4245          */
4246         if (!priv->plat->clk_csr)
4247                 stmmac_clk_csr_set(priv);
4248         else
4249                 priv->clk_csr = priv->plat->clk_csr;
4250
4251         stmmac_check_pcs_mode(priv);
4252
4253         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4254             priv->hw->pcs != STMMAC_PCS_TBI &&
4255             priv->hw->pcs != STMMAC_PCS_RTBI) {
4256                 /* MDIO bus Registration */
4257                 ret = stmmac_mdio_register(ndev);
4258                 if (ret < 0) {
4259                         dev_err(priv->device,
4260                                 "%s: MDIO bus (id: %d) registration failed",
4261                                 __func__, priv->plat->bus_id);
4262                         goto error_mdio_register;
4263                 }
4264         }
4265
4266         ret = register_netdev(ndev);
4267         if (ret) {
4268                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4269                         __func__, ret);
4270                 goto error_netdev_register;
4271         }
4272
4273         return ret;
4274
4275 error_netdev_register:
4276         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4277             priv->hw->pcs != STMMAC_PCS_TBI &&
4278             priv->hw->pcs != STMMAC_PCS_RTBI)
4279                 stmmac_mdio_unregister(ndev);
4280 error_mdio_register:
4281         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4282                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4283
4284                 netif_napi_del(&rx_q->napi);
4285         }
4286 error_hw_init:
4287         free_netdev(ndev);
4288
4289         return ret;
4290 }
4291 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4292
4293 /**
4294  * stmmac_dvr_remove
4295  * @dev: device pointer
4296  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4297  * changes the link status, releases the DMA descriptor rings.
4298  */
4299 int stmmac_dvr_remove(struct device *dev)
4300 {
4301         struct net_device *ndev = dev_get_drvdata(dev);
4302         struct stmmac_priv *priv = netdev_priv(ndev);
4303
4304         netdev_info(priv->dev, "%s: removing driver", __func__);
4305
4306         stmmac_stop_all_dma(priv);
4307
4308         priv->hw->mac->set_mac(priv->ioaddr, false);
4309         netif_carrier_off(ndev);
4310         unregister_netdev(ndev);
4311         if (priv->plat->stmmac_rst)
4312                 reset_control_assert(priv->plat->stmmac_rst);
4313         clk_disable_unprepare(priv->plat->pclk);
4314         clk_disable_unprepare(priv->plat->stmmac_clk);
4315         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4316             priv->hw->pcs != STMMAC_PCS_TBI &&
4317             priv->hw->pcs != STMMAC_PCS_RTBI)
4318                 stmmac_mdio_unregister(ndev);
4319         free_netdev(ndev);
4320
4321         return 0;
4322 }
4323 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4324
4325 /**
4326  * stmmac_suspend - suspend callback
4327  * @dev: device pointer
4328  * Description: this is the function to suspend the device and it is called
4329  * by the platform driver to stop the network queue, release the resources,
4330  * program the PMT register (for WoL), clean and release driver resources.
4331  */
4332 int stmmac_suspend(struct device *dev)
4333 {
4334         struct net_device *ndev = dev_get_drvdata(dev);
4335         struct stmmac_priv *priv = netdev_priv(ndev);
4336         unsigned long flags;
4337
4338         if (!ndev || !netif_running(ndev))
4339                 return 0;
4340
4341         if (ndev->phydev)
4342                 phy_stop(ndev->phydev);
4343
4344         spin_lock_irqsave(&priv->lock, flags);
4345
4346         netif_device_detach(ndev);
4347         stmmac_stop_all_queues(priv);
4348
4349         stmmac_disable_all_queues(priv);
4350
4351         /* Stop TX/RX DMA */
4352         stmmac_stop_all_dma(priv);
4353
4354         /* Enable Power down mode by programming the PMT regs */
4355         if (device_may_wakeup(priv->device)) {
4356                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4357                 priv->irq_wake = 1;
4358         } else {
4359                 priv->hw->mac->set_mac(priv->ioaddr, false);
4360                 pinctrl_pm_select_sleep_state(priv->device);
4361                 /* Disable clock in case of PWM is off */
4362                 clk_disable(priv->plat->pclk);
4363                 clk_disable(priv->plat->stmmac_clk);
4364         }
4365         spin_unlock_irqrestore(&priv->lock, flags);
4366
4367         priv->oldlink = false;
4368         priv->speed = SPEED_UNKNOWN;
4369         priv->oldduplex = DUPLEX_UNKNOWN;
4370         return 0;
4371 }
4372 EXPORT_SYMBOL_GPL(stmmac_suspend);
4373
4374 /**
4375  * stmmac_reset_queues_param - reset queue parameters
4376  * @dev: device pointer
4377  */
4378 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4379 {
4380         u32 rx_cnt = priv->plat->rx_queues_to_use;
4381         u32 tx_cnt = priv->plat->tx_queues_to_use;
4382         u32 queue;
4383
4384         for (queue = 0; queue < rx_cnt; queue++) {
4385                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4386
4387                 rx_q->cur_rx = 0;
4388                 rx_q->dirty_rx = 0;
4389         }
4390
4391         for (queue = 0; queue < tx_cnt; queue++) {
4392                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4393
4394                 tx_q->cur_tx = 0;
4395                 tx_q->dirty_tx = 0;
4396         }
4397 }
4398
4399 /**
4400  * stmmac_resume - resume callback
4401  * @dev: device pointer
4402  * Description: when resume this function is invoked to setup the DMA and CORE
4403  * in a usable state.
4404  */
4405 int stmmac_resume(struct device *dev)
4406 {
4407         struct net_device *ndev = dev_get_drvdata(dev);
4408         struct stmmac_priv *priv = netdev_priv(ndev);
4409         unsigned long flags;
4410
4411         if (!netif_running(ndev))
4412                 return 0;
4413
4414         /* Power Down bit, into the PM register, is cleared
4415          * automatically as soon as a magic packet or a Wake-up frame
4416          * is received. Anyway, it's better to manually clear
4417          * this bit because it can generate problems while resuming
4418          * from another devices (e.g. serial console).
4419          */
4420         if (device_may_wakeup(priv->device)) {
4421                 spin_lock_irqsave(&priv->lock, flags);
4422                 priv->hw->mac->pmt(priv->hw, 0);
4423                 spin_unlock_irqrestore(&priv->lock, flags);
4424                 priv->irq_wake = 0;
4425         } else {
4426                 pinctrl_pm_select_default_state(priv->device);
4427                 /* enable the clk previously disabled */
4428                 clk_enable(priv->plat->stmmac_clk);
4429                 clk_enable(priv->plat->pclk);
4430                 /* reset the phy so that it's ready */
4431                 if (priv->mii)
4432                         stmmac_mdio_reset(priv->mii);
4433         }
4434
4435         netif_device_attach(ndev);
4436
4437         spin_lock_irqsave(&priv->lock, flags);
4438
4439         stmmac_reset_queues_param(priv);
4440
4441         /* reset private mss value to force mss context settings at
4442          * next tso xmit (only used for gmac4).
4443          */
4444         priv->mss = 0;
4445
4446         stmmac_clear_descriptors(priv);
4447
4448         stmmac_hw_setup(ndev, false);
4449         stmmac_init_tx_coalesce(priv);
4450         stmmac_set_rx_mode(ndev);
4451
4452         stmmac_enable_all_queues(priv);
4453
4454         stmmac_start_all_queues(priv);
4455
4456         spin_unlock_irqrestore(&priv->lock, flags);
4457
4458         if (ndev->phydev)
4459                 phy_start(ndev->phydev);
4460
4461         return 0;
4462 }
4463 EXPORT_SYMBOL_GPL(stmmac_resume);
4464
4465 #ifndef MODULE
4466 static int __init stmmac_cmdline_opt(char *str)
4467 {
4468         char *opt;
4469
4470         if (!str || !*str)
4471                 return -EINVAL;
4472         while ((opt = strsep(&str, ",")) != NULL) {
4473                 if (!strncmp(opt, "debug:", 6)) {
4474                         if (kstrtoint(opt + 6, 0, &debug))
4475                                 goto err;
4476                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4477                         if (kstrtoint(opt + 8, 0, &phyaddr))
4478                                 goto err;
4479                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4480                         if (kstrtoint(opt + 7, 0, &buf_sz))
4481                                 goto err;
4482                 } else if (!strncmp(opt, "tc:", 3)) {
4483                         if (kstrtoint(opt + 3, 0, &tc))
4484                                 goto err;
4485                 } else if (!strncmp(opt, "watchdog:", 9)) {
4486                         if (kstrtoint(opt + 9, 0, &watchdog))
4487                                 goto err;
4488                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4489                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4490                                 goto err;
4491                 } else if (!strncmp(opt, "pause:", 6)) {
4492                         if (kstrtoint(opt + 6, 0, &pause))
4493                                 goto err;
4494                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4495                         if (kstrtoint(opt + 10, 0, &eee_timer))
4496                                 goto err;
4497                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4498                         if (kstrtoint(opt + 11, 0, &chain_mode))
4499                                 goto err;
4500                 }
4501         }
4502         return 0;
4503
4504 err:
4505         pr_err("%s: ERROR broken module parameter conversion", __func__);
4506         return -EINVAL;
4507 }
4508
4509 __setup("stmmaceth=", stmmac_cmdline_opt);
4510 #endif /* MODULE */
4511
4512 static int __init stmmac_init(void)
4513 {
4514 #ifdef CONFIG_DEBUG_FS
4515         /* Create debugfs main directory if it doesn't exist yet */
4516         if (!stmmac_fs_dir) {
4517                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4518
4519                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4520                         pr_err("ERROR %s, debugfs create directory failed\n",
4521                                STMMAC_RESOURCE_NAME);
4522
4523                         return -ENOMEM;
4524                 }
4525         }
4526 #endif
4527
4528         return 0;
4529 }
4530
4531 static void __exit stmmac_exit(void)
4532 {
4533 #ifdef CONFIG_DEBUG_FS
4534         debugfs_remove_recursive(stmmac_fs_dir);
4535 #endif
4536 }
4537
4538 module_init(stmmac_init)
4539 module_exit(stmmac_exit)
4540
4541 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4542 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4543 MODULE_LICENSE("GPL");