c0af0bc4e714891080e966c7d6ddd5e47e8ab5a5
[sfrench/cifs-2.6.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148         u32 queue;
149
150         for (queue = 0; queue < rx_queues_cnt; queue++) {
151                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152
153                 napi_disable(&rx_q->napi);
154         }
155 }
156
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164         u32 queue;
165
166         for (queue = 0; queue < rx_queues_cnt; queue++) {
167                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168
169                 napi_enable(&rx_q->napi);
170         }
171 }
172
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180         u32 queue;
181
182         for (queue = 0; queue < tx_queues_cnt; queue++)
183                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193         u32 queue;
194
195         for (queue = 0; queue < tx_queues_cnt; queue++)
196                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *      If a specific clk_csr value is passed from the platform
206  *      this means that the CSR Clock Range selection cannot be
207  *      changed at run-time and it is fixed (as reported in the driver
208  *      documentation). Viceversa the driver will try to set the MDC
209  *      clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213         u32 clk_rate;
214
215         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216
217         /* Platform provided default clk_csr would be assumed valid
218          * for all other cases except for the below mentioned ones.
219          * For values higher than the IEEE 802.3 specified frequency
220          * we can not estimate the proper divider as it is not known
221          * the frequency of clk_csr_i. So we do not change the default
222          * divider.
223          */
224         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225                 if (clk_rate < CSR_F_35M)
226                         priv->clk_csr = STMMAC_CSR_20_35M;
227                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228                         priv->clk_csr = STMMAC_CSR_35_60M;
229                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230                         priv->clk_csr = STMMAC_CSR_60_100M;
231                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232                         priv->clk_csr = STMMAC_CSR_100_150M;
233                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234                         priv->clk_csr = STMMAC_CSR_150_250M;
235                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236                         priv->clk_csr = STMMAC_CSR_250_300M;
237         }
238
239         if (priv->plat->has_sun8i) {
240                 if (clk_rate > 160000000)
241                         priv->clk_csr = 0x03;
242                 else if (clk_rate > 80000000)
243                         priv->clk_csr = 0x02;
244                 else if (clk_rate > 40000000)
245                         priv->clk_csr = 0x01;
246                 else
247                         priv->clk_csr = 0;
248         }
249 }
250
251 static void print_pkt(unsigned char *buf, int len)
252 {
253         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260         u32 avail;
261
262         if (tx_q->dirty_tx > tx_q->cur_tx)
263                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264         else
265                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266
267         return avail;
268 }
269
270 /**
271  * stmmac_rx_dirty - Get RX queue dirty
272  * @priv: driver private structure
273  * @queue: RX queue index
274  */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278         u32 dirty;
279
280         if (rx_q->dirty_rx <= rx_q->cur_rx)
281                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
282         else
283                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284
285         return dirty;
286 }
287
288 /**
289  * stmmac_hw_fix_mac_speed - callback for speed selection
290  * @priv: driver private structure
291  * Description: on some platforms (e.g. ST), some HW system configuration
292  * registers have to be set according to the link speed negotiated.
293  */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296         struct net_device *ndev = priv->dev;
297         struct phy_device *phydev = ndev->phydev;
298
299         if (likely(priv->plat->fix_mac_speed))
300                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302
303 /**
304  * stmmac_enable_eee_mode - check and enter in LPI mode
305  * @priv: driver private structure
306  * Description: this function is to verify and enter in LPI mode in case of
307  * EEE.
308  */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311         u32 tx_cnt = priv->plat->tx_queues_to_use;
312         u32 queue;
313
314         /* check if all TX queues have the work finished */
315         for (queue = 0; queue < tx_cnt; queue++) {
316                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317
318                 if (tx_q->dirty_tx != tx_q->cur_tx)
319                         return; /* still unfinished work */
320         }
321
322         /* Check and enter in LPI mode */
323         if (!priv->tx_path_in_lpi_mode)
324                 priv->hw->mac->set_eee_mode(priv->hw,
325                                             priv->plat->en_tx_lpi_clockgating);
326 }
327
328 /**
329  * stmmac_disable_eee_mode - disable and exit from LPI mode
330  * @priv: driver private structure
331  * Description: this function is to exit and disable EEE in case of
332  * LPI state is true. This is called by the xmit.
333  */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336         priv->hw->mac->reset_eee_mode(priv->hw);
337         del_timer_sync(&priv->eee_ctrl_timer);
338         priv->tx_path_in_lpi_mode = false;
339 }
340
341 /**
342  * stmmac_eee_ctrl_timer - EEE TX SW timer.
343  * @arg : data hook
344  * Description:
345  *  if there is no data transfer and if we are not in LPI state,
346  *  then MAC Transmitter can be moved to LPI state.
347  */
348 static void stmmac_eee_ctrl_timer(struct timer_list *t)
349 {
350         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
351
352         stmmac_enable_eee_mode(priv);
353         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355
356 /**
357  * stmmac_eee_init - init EEE
358  * @priv: driver private structure
359  * Description:
360  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
361  *  can also manage EEE, this function enable the LPI state and start related
362  *  timer.
363  */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366         struct net_device *ndev = priv->dev;
367         int interface = priv->plat->interface;
368         unsigned long flags;
369         bool ret = false;
370
371         if ((interface != PHY_INTERFACE_MODE_MII) &&
372             (interface != PHY_INTERFACE_MODE_GMII) &&
373             !phy_interface_mode_is_rgmii(interface))
374                 goto out;
375
376         /* Using PCS we cannot dial with the phy registers at this stage
377          * so we do not support extra feature like EEE.
378          */
379         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
380             (priv->hw->pcs == STMMAC_PCS_TBI) ||
381             (priv->hw->pcs == STMMAC_PCS_RTBI))
382                 goto out;
383
384         /* MAC core supports the EEE feature. */
385         if (priv->dma_cap.eee) {
386                 int tx_lpi_timer = priv->tx_lpi_timer;
387
388                 /* Check if the PHY supports EEE */
389                 if (phy_init_eee(ndev->phydev, 1)) {
390                         /* To manage at run-time if the EEE cannot be supported
391                          * anymore (for example because the lp caps have been
392                          * changed).
393                          * In that case the driver disable own timers.
394                          */
395                         spin_lock_irqsave(&priv->lock, flags);
396                         if (priv->eee_active) {
397                                 netdev_dbg(priv->dev, "disable EEE\n");
398                                 del_timer_sync(&priv->eee_ctrl_timer);
399                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
400                                                              tx_lpi_timer);
401                         }
402                         priv->eee_active = 0;
403                         spin_unlock_irqrestore(&priv->lock, flags);
404                         goto out;
405                 }
406                 /* Activate the EEE and start timers */
407                 spin_lock_irqsave(&priv->lock, flags);
408                 if (!priv->eee_active) {
409                         priv->eee_active = 1;
410                         timer_setup(&priv->eee_ctrl_timer,
411                                     stmmac_eee_ctrl_timer, 0);
412                         mod_timer(&priv->eee_ctrl_timer,
413                                   STMMAC_LPI_T(eee_timer));
414
415                         priv->hw->mac->set_eee_timer(priv->hw,
416                                                      STMMAC_DEFAULT_LIT_LS,
417                                                      tx_lpi_timer);
418                 }
419                 /* Set HW EEE according to the speed */
420                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
421
422                 ret = true;
423                 spin_unlock_irqrestore(&priv->lock, flags);
424
425                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
426         }
427 out:
428         return ret;
429 }
430
431 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
432  * @priv: driver private structure
433  * @p : descriptor pointer
434  * @skb : the socket buffer
435  * Description :
436  * This function will read timestamp from the descriptor & pass it to stack.
437  * and also perform some sanity checks.
438  */
439 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
440                                    struct dma_desc *p, struct sk_buff *skb)
441 {
442         struct skb_shared_hwtstamps shhwtstamp;
443         u64 ns;
444
445         if (!priv->hwts_tx_en)
446                 return;
447
448         /* exit if skb doesn't support hw tstamp */
449         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
450                 return;
451
452         /* check tx tstamp status */
453         if (priv->hw->desc->get_tx_timestamp_status(p)) {
454                 /* get the valid tstamp */
455                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
456
457                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
458                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
459
460                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
461                 /* pass tstamp to stack */
462                 skb_tstamp_tx(skb, &shhwtstamp);
463         }
464
465         return;
466 }
467
468 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
469  * @priv: driver private structure
470  * @p : descriptor pointer
471  * @np : next descriptor pointer
472  * @skb : the socket buffer
473  * Description :
474  * This function will read received packet's timestamp from the descriptor
475  * and pass it to stack. It also perform some sanity checks.
476  */
477 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
478                                    struct dma_desc *np, struct sk_buff *skb)
479 {
480         struct skb_shared_hwtstamps *shhwtstamp = NULL;
481         struct dma_desc *desc = p;
482         u64 ns;
483
484         if (!priv->hwts_rx_en)
485                 return;
486         /* For GMAC4, the valid timestamp is from CTX next desc. */
487         if (priv->plat->has_gmac4)
488                 desc = np;
489
490         /* Check if timestamp is available */
491         if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
492                 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
493                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
494                 shhwtstamp = skb_hwtstamps(skb);
495                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
496                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
497         } else  {
498                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
499         }
500 }
501
502 /**
503  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
504  *  @dev: device pointer.
505  *  @ifr: An IOCTL specific structure, that can contain a pointer to
506  *  a proprietary structure used to pass information to the driver.
507  *  Description:
508  *  This function configures the MAC to enable/disable both outgoing(TX)
509  *  and incoming(RX) packets time stamping based on user input.
510  *  Return Value:
511  *  0 on success and an appropriate -ve integer on failure.
512  */
513 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
514 {
515         struct stmmac_priv *priv = netdev_priv(dev);
516         struct hwtstamp_config config;
517         struct timespec64 now;
518         u64 temp = 0;
519         u32 ptp_v2 = 0;
520         u32 tstamp_all = 0;
521         u32 ptp_over_ipv4_udp = 0;
522         u32 ptp_over_ipv6_udp = 0;
523         u32 ptp_over_ethernet = 0;
524         u32 snap_type_sel = 0;
525         u32 ts_master_en = 0;
526         u32 ts_event_en = 0;
527         u32 value = 0;
528         u32 sec_inc;
529
530         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
531                 netdev_alert(priv->dev, "No support for HW time stamping\n");
532                 priv->hwts_tx_en = 0;
533                 priv->hwts_rx_en = 0;
534
535                 return -EOPNOTSUPP;
536         }
537
538         if (copy_from_user(&config, ifr->ifr_data,
539                            sizeof(struct hwtstamp_config)))
540                 return -EFAULT;
541
542         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
543                    __func__, config.flags, config.tx_type, config.rx_filter);
544
545         /* reserved for future extensions */
546         if (config.flags)
547                 return -EINVAL;
548
549         if (config.tx_type != HWTSTAMP_TX_OFF &&
550             config.tx_type != HWTSTAMP_TX_ON)
551                 return -ERANGE;
552
553         if (priv->adv_ts) {
554                 switch (config.rx_filter) {
555                 case HWTSTAMP_FILTER_NONE:
556                         /* time stamp no incoming packet at all */
557                         config.rx_filter = HWTSTAMP_FILTER_NONE;
558                         break;
559
560                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
561                         /* PTP v1, UDP, any kind of event packet */
562                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
563                         /* take time stamp for all event messages */
564                         if (priv->plat->has_gmac4)
565                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
566                         else
567                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
568
569                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
570                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
571                         break;
572
573                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
574                         /* PTP v1, UDP, Sync packet */
575                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
576                         /* take time stamp for SYNC messages only */
577                         ts_event_en = PTP_TCR_TSEVNTENA;
578
579                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
580                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
581                         break;
582
583                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
584                         /* PTP v1, UDP, Delay_req packet */
585                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
586                         /* take time stamp for Delay_Req messages only */
587                         ts_master_en = PTP_TCR_TSMSTRENA;
588                         ts_event_en = PTP_TCR_TSEVNTENA;
589
590                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
592                         break;
593
594                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
595                         /* PTP v2, UDP, any kind of event packet */
596                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
597                         ptp_v2 = PTP_TCR_TSVER2ENA;
598                         /* take time stamp for all event messages */
599                         if (priv->plat->has_gmac4)
600                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
601                         else
602                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
603
604                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606                         break;
607
608                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
609                         /* PTP v2, UDP, Sync packet */
610                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
611                         ptp_v2 = PTP_TCR_TSVER2ENA;
612                         /* take time stamp for SYNC messages only */
613                         ts_event_en = PTP_TCR_TSEVNTENA;
614
615                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
616                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
617                         break;
618
619                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
620                         /* PTP v2, UDP, Delay_req packet */
621                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
622                         ptp_v2 = PTP_TCR_TSVER2ENA;
623                         /* take time stamp for Delay_Req messages only */
624                         ts_master_en = PTP_TCR_TSMSTRENA;
625                         ts_event_en = PTP_TCR_TSEVNTENA;
626
627                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
628                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
629                         break;
630
631                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
632                         /* PTP v2/802.AS1 any layer, any kind of event packet */
633                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
634                         ptp_v2 = PTP_TCR_TSVER2ENA;
635                         /* take time stamp for all event messages */
636                         if (priv->plat->has_gmac4)
637                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
638                         else
639                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
640
641                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643                         ptp_over_ethernet = PTP_TCR_TSIPENA;
644                         break;
645
646                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
647                         /* PTP v2/802.AS1, any layer, Sync packet */
648                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
649                         ptp_v2 = PTP_TCR_TSVER2ENA;
650                         /* take time stamp for SYNC messages only */
651                         ts_event_en = PTP_TCR_TSEVNTENA;
652
653                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
654                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
655                         ptp_over_ethernet = PTP_TCR_TSIPENA;
656                         break;
657
658                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
659                         /* PTP v2/802.AS1, any layer, Delay_req packet */
660                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
661                         ptp_v2 = PTP_TCR_TSVER2ENA;
662                         /* take time stamp for Delay_Req messages only */
663                         ts_master_en = PTP_TCR_TSMSTRENA;
664                         ts_event_en = PTP_TCR_TSEVNTENA;
665
666                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
667                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
668                         ptp_over_ethernet = PTP_TCR_TSIPENA;
669                         break;
670
671                 case HWTSTAMP_FILTER_NTP_ALL:
672                 case HWTSTAMP_FILTER_ALL:
673                         /* time stamp any incoming packet */
674                         config.rx_filter = HWTSTAMP_FILTER_ALL;
675                         tstamp_all = PTP_TCR_TSENALL;
676                         break;
677
678                 default:
679                         return -ERANGE;
680                 }
681         } else {
682                 switch (config.rx_filter) {
683                 case HWTSTAMP_FILTER_NONE:
684                         config.rx_filter = HWTSTAMP_FILTER_NONE;
685                         break;
686                 default:
687                         /* PTP v1, UDP, any kind of event packet */
688                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
689                         break;
690                 }
691         }
692         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
693         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
694
695         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
696                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
697         else {
698                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
699                          tstamp_all | ptp_v2 | ptp_over_ethernet |
700                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
701                          ts_master_en | snap_type_sel);
702                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
703
704                 /* program Sub Second Increment reg */
705                 sec_inc = priv->hw->ptp->config_sub_second_increment(
706                         priv->ptpaddr, priv->plat->clk_ptp_rate,
707                         priv->plat->has_gmac4);
708                 temp = div_u64(1000000000ULL, sec_inc);
709
710                 /* calculate default added value:
711                  * formula is :
712                  * addend = (2^32)/freq_div_ratio;
713                  * where, freq_div_ratio = 1e9ns/sec_inc
714                  */
715                 temp = (u64)(temp << 32);
716                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
717                 priv->hw->ptp->config_addend(priv->ptpaddr,
718                                              priv->default_addend);
719
720                 /* initialize system time */
721                 ktime_get_real_ts64(&now);
722
723                 /* lower 32 bits of tv_sec are safe until y2106 */
724                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
725                                             now.tv_nsec);
726         }
727
728         return copy_to_user(ifr->ifr_data, &config,
729                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
730 }
731
732 /**
733  * stmmac_init_ptp - init PTP
734  * @priv: driver private structure
735  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
736  * This is done by looking at the HW cap. register.
737  * This function also registers the ptp driver.
738  */
739 static int stmmac_init_ptp(struct stmmac_priv *priv)
740 {
741         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
742                 return -EOPNOTSUPP;
743
744         priv->adv_ts = 0;
745         /* Check if adv_ts can be enabled for dwmac 4.x core */
746         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
747                 priv->adv_ts = 1;
748         /* Dwmac 3.x core with extend_desc can support adv_ts */
749         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
750                 priv->adv_ts = 1;
751
752         if (priv->dma_cap.time_stamp)
753                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
754
755         if (priv->adv_ts)
756                 netdev_info(priv->dev,
757                             "IEEE 1588-2008 Advanced Timestamp supported\n");
758
759         priv->hw->ptp = &stmmac_ptp;
760         priv->hwts_tx_en = 0;
761         priv->hwts_rx_en = 0;
762
763         stmmac_ptp_register(priv);
764
765         return 0;
766 }
767
768 static void stmmac_release_ptp(struct stmmac_priv *priv)
769 {
770         if (priv->plat->clk_ptp_ref)
771                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
772         stmmac_ptp_unregister(priv);
773 }
774
775 /**
776  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
777  *  @priv: driver private structure
778  *  Description: It is used for configuring the flow control in all queues
779  */
780 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
781 {
782         u32 tx_cnt = priv->plat->tx_queues_to_use;
783
784         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
785                                  priv->pause, tx_cnt);
786 }
787
788 /**
789  * stmmac_adjust_link - adjusts the link parameters
790  * @dev: net device structure
791  * Description: this is the helper called by the physical abstraction layer
792  * drivers to communicate the phy link status. According the speed and duplex
793  * this driver can invoke registered glue-logic as well.
794  * It also invoke the eee initialization because it could happen when switch
795  * on different networks (that are eee capable).
796  */
797 static void stmmac_adjust_link(struct net_device *dev)
798 {
799         struct stmmac_priv *priv = netdev_priv(dev);
800         struct phy_device *phydev = dev->phydev;
801         unsigned long flags;
802         bool new_state = false;
803
804         if (!phydev)
805                 return;
806
807         spin_lock_irqsave(&priv->lock, flags);
808
809         if (phydev->link) {
810                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
811
812                 /* Now we make sure that we can be in full duplex mode.
813                  * If not, we operate in half-duplex mode. */
814                 if (phydev->duplex != priv->oldduplex) {
815                         new_state = true;
816                         if (!phydev->duplex)
817                                 ctrl &= ~priv->hw->link.duplex;
818                         else
819                                 ctrl |= priv->hw->link.duplex;
820                         priv->oldduplex = phydev->duplex;
821                 }
822                 /* Flow Control operation */
823                 if (phydev->pause)
824                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
825
826                 if (phydev->speed != priv->speed) {
827                         new_state = true;
828                         ctrl &= ~priv->hw->link.speed_mask;
829                         switch (phydev->speed) {
830                         case SPEED_1000:
831                                 ctrl |= priv->hw->link.speed1000;
832                                 break;
833                         case SPEED_100:
834                                 ctrl |= priv->hw->link.speed100;
835                                 break;
836                         case SPEED_10:
837                                 ctrl |= priv->hw->link.speed10;
838                                 break;
839                         default:
840                                 netif_warn(priv, link, priv->dev,
841                                            "broken speed: %d\n", phydev->speed);
842                                 phydev->speed = SPEED_UNKNOWN;
843                                 break;
844                         }
845                         if (phydev->speed != SPEED_UNKNOWN)
846                                 stmmac_hw_fix_mac_speed(priv);
847                         priv->speed = phydev->speed;
848                 }
849
850                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
851
852                 if (!priv->oldlink) {
853                         new_state = true;
854                         priv->oldlink = true;
855                 }
856         } else if (priv->oldlink) {
857                 new_state = true;
858                 priv->oldlink = false;
859                 priv->speed = SPEED_UNKNOWN;
860                 priv->oldduplex = DUPLEX_UNKNOWN;
861         }
862
863         if (new_state && netif_msg_link(priv))
864                 phy_print_status(phydev);
865
866         spin_unlock_irqrestore(&priv->lock, flags);
867
868         if (phydev->is_pseudo_fixed_link)
869                 /* Stop PHY layer to call the hook to adjust the link in case
870                  * of a switch is attached to the stmmac driver.
871                  */
872                 phydev->irq = PHY_IGNORE_INTERRUPT;
873         else
874                 /* At this stage, init the EEE if supported.
875                  * Never called in case of fixed_link.
876                  */
877                 priv->eee_enabled = stmmac_eee_init(priv);
878 }
879
880 /**
881  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
882  * @priv: driver private structure
883  * Description: this is to verify if the HW supports the PCS.
884  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
885  * configured for the TBI, RTBI, or SGMII PHY interface.
886  */
887 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
888 {
889         int interface = priv->plat->interface;
890
891         if (priv->dma_cap.pcs) {
892                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
893                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
894                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
895                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
896                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
897                         priv->hw->pcs = STMMAC_PCS_RGMII;
898                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
899                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
900                         priv->hw->pcs = STMMAC_PCS_SGMII;
901                 }
902         }
903 }
904
905 /**
906  * stmmac_init_phy - PHY initialization
907  * @dev: net device structure
908  * Description: it initializes the driver's PHY state, and attaches the PHY
909  * to the mac driver.
910  *  Return value:
911  *  0 on success
912  */
913 static int stmmac_init_phy(struct net_device *dev)
914 {
915         struct stmmac_priv *priv = netdev_priv(dev);
916         struct phy_device *phydev;
917         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
918         char bus_id[MII_BUS_ID_SIZE];
919         int interface = priv->plat->interface;
920         int max_speed = priv->plat->max_speed;
921         priv->oldlink = false;
922         priv->speed = SPEED_UNKNOWN;
923         priv->oldduplex = DUPLEX_UNKNOWN;
924
925         if (priv->plat->phy_node) {
926                 phydev = of_phy_connect(dev, priv->plat->phy_node,
927                                         &stmmac_adjust_link, 0, interface);
928         } else {
929                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
930                          priv->plat->bus_id);
931
932                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
933                          priv->plat->phy_addr);
934                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
935                            phy_id_fmt);
936
937                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
938                                      interface);
939         }
940
941         if (IS_ERR_OR_NULL(phydev)) {
942                 netdev_err(priv->dev, "Could not attach to PHY\n");
943                 if (!phydev)
944                         return -ENODEV;
945
946                 return PTR_ERR(phydev);
947         }
948
949         /* Stop Advertising 1000BASE Capability if interface is not GMII */
950         if ((interface == PHY_INTERFACE_MODE_MII) ||
951             (interface == PHY_INTERFACE_MODE_RMII) ||
952                 (max_speed < 1000 && max_speed > 0))
953                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
954                                          SUPPORTED_1000baseT_Full);
955
956         /*
957          * Broken HW is sometimes missing the pull-up resistor on the
958          * MDIO line, which results in reads to non-existent devices returning
959          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
960          * device as well.
961          * Note: phydev->phy_id is the result of reading the UID PHY registers.
962          */
963         if (!priv->plat->phy_node && phydev->phy_id == 0) {
964                 phy_disconnect(phydev);
965                 return -ENODEV;
966         }
967
968         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
969          * subsequent PHY polling, make sure we force a link transition if
970          * we have a UP/DOWN/UP transition
971          */
972         if (phydev->is_pseudo_fixed_link)
973                 phydev->irq = PHY_POLL;
974
975         phy_attached_info(phydev);
976         return 0;
977 }
978
979 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
980 {
981         u32 rx_cnt = priv->plat->rx_queues_to_use;
982         void *head_rx;
983         u32 queue;
984
985         /* Display RX rings */
986         for (queue = 0; queue < rx_cnt; queue++) {
987                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
988
989                 pr_info("\tRX Queue %u rings\n", queue);
990
991                 if (priv->extend_desc)
992                         head_rx = (void *)rx_q->dma_erx;
993                 else
994                         head_rx = (void *)rx_q->dma_rx;
995
996                 /* Display RX ring */
997                 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
998         }
999 }
1000
1001 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1002 {
1003         u32 tx_cnt = priv->plat->tx_queues_to_use;
1004         void *head_tx;
1005         u32 queue;
1006
1007         /* Display TX rings */
1008         for (queue = 0; queue < tx_cnt; queue++) {
1009                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1010
1011                 pr_info("\tTX Queue %d rings\n", queue);
1012
1013                 if (priv->extend_desc)
1014                         head_tx = (void *)tx_q->dma_etx;
1015                 else
1016                         head_tx = (void *)tx_q->dma_tx;
1017
1018                 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1019         }
1020 }
1021
1022 static void stmmac_display_rings(struct stmmac_priv *priv)
1023 {
1024         /* Display RX ring */
1025         stmmac_display_rx_rings(priv);
1026
1027         /* Display TX ring */
1028         stmmac_display_tx_rings(priv);
1029 }
1030
1031 static int stmmac_set_bfsize(int mtu, int bufsize)
1032 {
1033         int ret = bufsize;
1034
1035         if (mtu >= BUF_SIZE_4KiB)
1036                 ret = BUF_SIZE_8KiB;
1037         else if (mtu >= BUF_SIZE_2KiB)
1038                 ret = BUF_SIZE_4KiB;
1039         else if (mtu > DEFAULT_BUFSIZE)
1040                 ret = BUF_SIZE_2KiB;
1041         else
1042                 ret = DEFAULT_BUFSIZE;
1043
1044         return ret;
1045 }
1046
1047 /**
1048  * stmmac_clear_rx_descriptors - clear RX descriptors
1049  * @priv: driver private structure
1050  * @queue: RX queue index
1051  * Description: this function is called to clear the RX descriptors
1052  * in case of both basic and extended descriptors are used.
1053  */
1054 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1055 {
1056         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1057         int i;
1058
1059         /* Clear the RX descriptors */
1060         for (i = 0; i < DMA_RX_SIZE; i++)
1061                 if (priv->extend_desc)
1062                         priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1063                                                      priv->use_riwt, priv->mode,
1064                                                      (i == DMA_RX_SIZE - 1));
1065                 else
1066                         priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1067                                                      priv->use_riwt, priv->mode,
1068                                                      (i == DMA_RX_SIZE - 1));
1069 }
1070
1071 /**
1072  * stmmac_clear_tx_descriptors - clear tx descriptors
1073  * @priv: driver private structure
1074  * @queue: TX queue index.
1075  * Description: this function is called to clear the TX descriptors
1076  * in case of both basic and extended descriptors are used.
1077  */
1078 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1079 {
1080         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1081         int i;
1082
1083         /* Clear the TX descriptors */
1084         for (i = 0; i < DMA_TX_SIZE; i++)
1085                 if (priv->extend_desc)
1086                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1087                                                      priv->mode,
1088                                                      (i == DMA_TX_SIZE - 1));
1089                 else
1090                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1091                                                      priv->mode,
1092                                                      (i == DMA_TX_SIZE - 1));
1093 }
1094
1095 /**
1096  * stmmac_clear_descriptors - clear descriptors
1097  * @priv: driver private structure
1098  * Description: this function is called to clear the TX and RX descriptors
1099  * in case of both basic and extended descriptors are used.
1100  */
1101 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1102 {
1103         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1104         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1105         u32 queue;
1106
1107         /* Clear the RX descriptors */
1108         for (queue = 0; queue < rx_queue_cnt; queue++)
1109                 stmmac_clear_rx_descriptors(priv, queue);
1110
1111         /* Clear the TX descriptors */
1112         for (queue = 0; queue < tx_queue_cnt; queue++)
1113                 stmmac_clear_tx_descriptors(priv, queue);
1114 }
1115
1116 /**
1117  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1118  * @priv: driver private structure
1119  * @p: descriptor pointer
1120  * @i: descriptor index
1121  * @flags: gfp flag
1122  * @queue: RX queue index
1123  * Description: this function is called to allocate a receive buffer, perform
1124  * the DMA mapping and init the descriptor.
1125  */
1126 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1127                                   int i, gfp_t flags, u32 queue)
1128 {
1129         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1130         struct sk_buff *skb;
1131
1132         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1133         if (!skb) {
1134                 netdev_err(priv->dev,
1135                            "%s: Rx init fails; skb is NULL\n", __func__);
1136                 return -ENOMEM;
1137         }
1138         rx_q->rx_skbuff[i] = skb;
1139         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1140                                                 priv->dma_buf_sz,
1141                                                 DMA_FROM_DEVICE);
1142         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1143                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1144                 dev_kfree_skb_any(skb);
1145                 return -EINVAL;
1146         }
1147
1148         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1149                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1150         else
1151                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1152
1153         if ((priv->hw->mode->init_desc3) &&
1154             (priv->dma_buf_sz == BUF_SIZE_16KiB))
1155                 priv->hw->mode->init_desc3(p);
1156
1157         return 0;
1158 }
1159
1160 /**
1161  * stmmac_free_rx_buffer - free RX dma buffers
1162  * @priv: private structure
1163  * @queue: RX queue index
1164  * @i: buffer index.
1165  */
1166 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1167 {
1168         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1169
1170         if (rx_q->rx_skbuff[i]) {
1171                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1172                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1173                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1174         }
1175         rx_q->rx_skbuff[i] = NULL;
1176 }
1177
1178 /**
1179  * stmmac_free_tx_buffer - free RX dma buffers
1180  * @priv: private structure
1181  * @queue: RX queue index
1182  * @i: buffer index.
1183  */
1184 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1185 {
1186         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1187
1188         if (tx_q->tx_skbuff_dma[i].buf) {
1189                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1190                         dma_unmap_page(priv->device,
1191                                        tx_q->tx_skbuff_dma[i].buf,
1192                                        tx_q->tx_skbuff_dma[i].len,
1193                                        DMA_TO_DEVICE);
1194                 else
1195                         dma_unmap_single(priv->device,
1196                                          tx_q->tx_skbuff_dma[i].buf,
1197                                          tx_q->tx_skbuff_dma[i].len,
1198                                          DMA_TO_DEVICE);
1199         }
1200
1201         if (tx_q->tx_skbuff[i]) {
1202                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1203                 tx_q->tx_skbuff[i] = NULL;
1204                 tx_q->tx_skbuff_dma[i].buf = 0;
1205                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1206         }
1207 }
1208
1209 /**
1210  * init_dma_rx_desc_rings - init the RX descriptor rings
1211  * @dev: net device structure
1212  * @flags: gfp flag.
1213  * Description: this function initializes the DMA RX descriptors
1214  * and allocates the socket buffers. It supports the chained and ring
1215  * modes.
1216  */
1217 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1218 {
1219         struct stmmac_priv *priv = netdev_priv(dev);
1220         u32 rx_count = priv->plat->rx_queues_to_use;
1221         unsigned int bfsize = 0;
1222         int ret = -ENOMEM;
1223         int queue;
1224         int i;
1225
1226         if (priv->hw->mode->set_16kib_bfsize)
1227                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1228
1229         if (bfsize < BUF_SIZE_16KiB)
1230                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1231
1232         priv->dma_buf_sz = bfsize;
1233
1234         /* RX INITIALIZATION */
1235         netif_dbg(priv, probe, priv->dev,
1236                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1237
1238         for (queue = 0; queue < rx_count; queue++) {
1239                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1240
1241                 netif_dbg(priv, probe, priv->dev,
1242                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1243                           (u32)rx_q->dma_rx_phy);
1244
1245                 for (i = 0; i < DMA_RX_SIZE; i++) {
1246                         struct dma_desc *p;
1247
1248                         if (priv->extend_desc)
1249                                 p = &((rx_q->dma_erx + i)->basic);
1250                         else
1251                                 p = rx_q->dma_rx + i;
1252
1253                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1254                                                      queue);
1255                         if (ret)
1256                                 goto err_init_rx_buffers;
1257
1258                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1259                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1260                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1261                 }
1262
1263                 rx_q->cur_rx = 0;
1264                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1265
1266                 stmmac_clear_rx_descriptors(priv, queue);
1267
1268                 /* Setup the chained descriptor addresses */
1269                 if (priv->mode == STMMAC_CHAIN_MODE) {
1270                         if (priv->extend_desc)
1271                                 priv->hw->mode->init(rx_q->dma_erx,
1272                                                      rx_q->dma_rx_phy,
1273                                                      DMA_RX_SIZE, 1);
1274                         else
1275                                 priv->hw->mode->init(rx_q->dma_rx,
1276                                                      rx_q->dma_rx_phy,
1277                                                      DMA_RX_SIZE, 0);
1278                 }
1279         }
1280
1281         buf_sz = bfsize;
1282
1283         return 0;
1284
1285 err_init_rx_buffers:
1286         while (queue >= 0) {
1287                 while (--i >= 0)
1288                         stmmac_free_rx_buffer(priv, queue, i);
1289
1290                 if (queue == 0)
1291                         break;
1292
1293                 i = DMA_RX_SIZE;
1294                 queue--;
1295         }
1296
1297         return ret;
1298 }
1299
1300 /**
1301  * init_dma_tx_desc_rings - init the TX descriptor rings
1302  * @dev: net device structure.
1303  * Description: this function initializes the DMA TX descriptors
1304  * and allocates the socket buffers. It supports the chained and ring
1305  * modes.
1306  */
1307 static int init_dma_tx_desc_rings(struct net_device *dev)
1308 {
1309         struct stmmac_priv *priv = netdev_priv(dev);
1310         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1311         u32 queue;
1312         int i;
1313
1314         for (queue = 0; queue < tx_queue_cnt; queue++) {
1315                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1316
1317                 netif_dbg(priv, probe, priv->dev,
1318                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1319                          (u32)tx_q->dma_tx_phy);
1320
1321                 /* Setup the chained descriptor addresses */
1322                 if (priv->mode == STMMAC_CHAIN_MODE) {
1323                         if (priv->extend_desc)
1324                                 priv->hw->mode->init(tx_q->dma_etx,
1325                                                      tx_q->dma_tx_phy,
1326                                                      DMA_TX_SIZE, 1);
1327                         else
1328                                 priv->hw->mode->init(tx_q->dma_tx,
1329                                                      tx_q->dma_tx_phy,
1330                                                      DMA_TX_SIZE, 0);
1331                 }
1332
1333                 for (i = 0; i < DMA_TX_SIZE; i++) {
1334                         struct dma_desc *p;
1335                         if (priv->extend_desc)
1336                                 p = &((tx_q->dma_etx + i)->basic);
1337                         else
1338                                 p = tx_q->dma_tx + i;
1339
1340                         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1341                                 p->des0 = 0;
1342                                 p->des1 = 0;
1343                                 p->des2 = 0;
1344                                 p->des3 = 0;
1345                         } else {
1346                                 p->des2 = 0;
1347                         }
1348
1349                         tx_q->tx_skbuff_dma[i].buf = 0;
1350                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1351                         tx_q->tx_skbuff_dma[i].len = 0;
1352                         tx_q->tx_skbuff_dma[i].last_segment = false;
1353                         tx_q->tx_skbuff[i] = NULL;
1354                 }
1355
1356                 tx_q->dirty_tx = 0;
1357                 tx_q->cur_tx = 0;
1358
1359                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1360         }
1361
1362         return 0;
1363 }
1364
1365 /**
1366  * init_dma_desc_rings - init the RX/TX descriptor rings
1367  * @dev: net device structure
1368  * @flags: gfp flag.
1369  * Description: this function initializes the DMA RX/TX descriptors
1370  * and allocates the socket buffers. It supports the chained and ring
1371  * modes.
1372  */
1373 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1374 {
1375         struct stmmac_priv *priv = netdev_priv(dev);
1376         int ret;
1377
1378         ret = init_dma_rx_desc_rings(dev, flags);
1379         if (ret)
1380                 return ret;
1381
1382         ret = init_dma_tx_desc_rings(dev);
1383
1384         stmmac_clear_descriptors(priv);
1385
1386         if (netif_msg_hw(priv))
1387                 stmmac_display_rings(priv);
1388
1389         return ret;
1390 }
1391
1392 /**
1393  * dma_free_rx_skbufs - free RX dma buffers
1394  * @priv: private structure
1395  * @queue: RX queue index
1396  */
1397 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1398 {
1399         int i;
1400
1401         for (i = 0; i < DMA_RX_SIZE; i++)
1402                 stmmac_free_rx_buffer(priv, queue, i);
1403 }
1404
1405 /**
1406  * dma_free_tx_skbufs - free TX dma buffers
1407  * @priv: private structure
1408  * @queue: TX queue index
1409  */
1410 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1411 {
1412         int i;
1413
1414         for (i = 0; i < DMA_TX_SIZE; i++)
1415                 stmmac_free_tx_buffer(priv, queue, i);
1416 }
1417
1418 /**
1419  * free_dma_rx_desc_resources - free RX dma desc resources
1420  * @priv: private structure
1421  */
1422 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1423 {
1424         u32 rx_count = priv->plat->rx_queues_to_use;
1425         u32 queue;
1426
1427         /* Free RX queue resources */
1428         for (queue = 0; queue < rx_count; queue++) {
1429                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1430
1431                 /* Release the DMA RX socket buffers */
1432                 dma_free_rx_skbufs(priv, queue);
1433
1434                 /* Free DMA regions of consistent memory previously allocated */
1435                 if (!priv->extend_desc)
1436                         dma_free_coherent(priv->device,
1437                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1438                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1439                 else
1440                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1441                                           sizeof(struct dma_extended_desc),
1442                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1443
1444                 kfree(rx_q->rx_skbuff_dma);
1445                 kfree(rx_q->rx_skbuff);
1446         }
1447 }
1448
1449 /**
1450  * free_dma_tx_desc_resources - free TX dma desc resources
1451  * @priv: private structure
1452  */
1453 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1454 {
1455         u32 tx_count = priv->plat->tx_queues_to_use;
1456         u32 queue;
1457
1458         /* Free TX queue resources */
1459         for (queue = 0; queue < tx_count; queue++) {
1460                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1461
1462                 /* Release the DMA TX socket buffers */
1463                 dma_free_tx_skbufs(priv, queue);
1464
1465                 /* Free DMA regions of consistent memory previously allocated */
1466                 if (!priv->extend_desc)
1467                         dma_free_coherent(priv->device,
1468                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1469                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1470                 else
1471                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1472                                           sizeof(struct dma_extended_desc),
1473                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1474
1475                 kfree(tx_q->tx_skbuff_dma);
1476                 kfree(tx_q->tx_skbuff);
1477         }
1478 }
1479
1480 /**
1481  * alloc_dma_rx_desc_resources - alloc RX resources.
1482  * @priv: private structure
1483  * Description: according to which descriptor can be used (extend or basic)
1484  * this function allocates the resources for TX and RX paths. In case of
1485  * reception, for example, it pre-allocated the RX socket buffer in order to
1486  * allow zero-copy mechanism.
1487  */
1488 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1489 {
1490         u32 rx_count = priv->plat->rx_queues_to_use;
1491         int ret = -ENOMEM;
1492         u32 queue;
1493
1494         /* RX queues buffers and DMA */
1495         for (queue = 0; queue < rx_count; queue++) {
1496                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1497
1498                 rx_q->queue_index = queue;
1499                 rx_q->priv_data = priv;
1500
1501                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1502                                                     sizeof(dma_addr_t),
1503                                                     GFP_KERNEL);
1504                 if (!rx_q->rx_skbuff_dma)
1505                         goto err_dma;
1506
1507                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1508                                                 sizeof(struct sk_buff *),
1509                                                 GFP_KERNEL);
1510                 if (!rx_q->rx_skbuff)
1511                         goto err_dma;
1512
1513                 if (priv->extend_desc) {
1514                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1515                                                             DMA_RX_SIZE *
1516                                                             sizeof(struct
1517                                                             dma_extended_desc),
1518                                                             &rx_q->dma_rx_phy,
1519                                                             GFP_KERNEL);
1520                         if (!rx_q->dma_erx)
1521                                 goto err_dma;
1522
1523                 } else {
1524                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1525                                                            DMA_RX_SIZE *
1526                                                            sizeof(struct
1527                                                            dma_desc),
1528                                                            &rx_q->dma_rx_phy,
1529                                                            GFP_KERNEL);
1530                         if (!rx_q->dma_rx)
1531                                 goto err_dma;
1532                 }
1533         }
1534
1535         return 0;
1536
1537 err_dma:
1538         free_dma_rx_desc_resources(priv);
1539
1540         return ret;
1541 }
1542
1543 /**
1544  * alloc_dma_tx_desc_resources - alloc TX resources.
1545  * @priv: private structure
1546  * Description: according to which descriptor can be used (extend or basic)
1547  * this function allocates the resources for TX and RX paths. In case of
1548  * reception, for example, it pre-allocated the RX socket buffer in order to
1549  * allow zero-copy mechanism.
1550  */
1551 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1552 {
1553         u32 tx_count = priv->plat->tx_queues_to_use;
1554         int ret = -ENOMEM;
1555         u32 queue;
1556
1557         /* TX queues buffers and DMA */
1558         for (queue = 0; queue < tx_count; queue++) {
1559                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1560
1561                 tx_q->queue_index = queue;
1562                 tx_q->priv_data = priv;
1563
1564                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1565                                                     sizeof(*tx_q->tx_skbuff_dma),
1566                                                     GFP_KERNEL);
1567                 if (!tx_q->tx_skbuff_dma)
1568                         goto err_dma;
1569
1570                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1571                                                 sizeof(struct sk_buff *),
1572                                                 GFP_KERNEL);
1573                 if (!tx_q->tx_skbuff)
1574                         goto err_dma;
1575
1576                 if (priv->extend_desc) {
1577                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1578                                                             DMA_TX_SIZE *
1579                                                             sizeof(struct
1580                                                             dma_extended_desc),
1581                                                             &tx_q->dma_tx_phy,
1582                                                             GFP_KERNEL);
1583                         if (!tx_q->dma_etx)
1584                                 goto err_dma;
1585                 } else {
1586                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1587                                                            DMA_TX_SIZE *
1588                                                            sizeof(struct
1589                                                                   dma_desc),
1590                                                            &tx_q->dma_tx_phy,
1591                                                            GFP_KERNEL);
1592                         if (!tx_q->dma_tx)
1593                                 goto err_dma;
1594                 }
1595         }
1596
1597         return 0;
1598
1599 err_dma:
1600         free_dma_tx_desc_resources(priv);
1601
1602         return ret;
1603 }
1604
1605 /**
1606  * alloc_dma_desc_resources - alloc TX/RX resources.
1607  * @priv: private structure
1608  * Description: according to which descriptor can be used (extend or basic)
1609  * this function allocates the resources for TX and RX paths. In case of
1610  * reception, for example, it pre-allocated the RX socket buffer in order to
1611  * allow zero-copy mechanism.
1612  */
1613 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1614 {
1615         /* RX Allocation */
1616         int ret = alloc_dma_rx_desc_resources(priv);
1617
1618         if (ret)
1619                 return ret;
1620
1621         ret = alloc_dma_tx_desc_resources(priv);
1622
1623         return ret;
1624 }
1625
1626 /**
1627  * free_dma_desc_resources - free dma desc resources
1628  * @priv: private structure
1629  */
1630 static void free_dma_desc_resources(struct stmmac_priv *priv)
1631 {
1632         /* Release the DMA RX socket buffers */
1633         free_dma_rx_desc_resources(priv);
1634
1635         /* Release the DMA TX socket buffers */
1636         free_dma_tx_desc_resources(priv);
1637 }
1638
1639 /**
1640  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1641  *  @priv: driver private structure
1642  *  Description: It is used for enabling the rx queues in the MAC
1643  */
1644 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1645 {
1646         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1647         int queue;
1648         u8 mode;
1649
1650         for (queue = 0; queue < rx_queues_count; queue++) {
1651                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1652                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1653         }
1654 }
1655
1656 /**
1657  * stmmac_start_rx_dma - start RX DMA channel
1658  * @priv: driver private structure
1659  * @chan: RX channel index
1660  * Description:
1661  * This starts a RX DMA channel
1662  */
1663 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1664 {
1665         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1666         priv->hw->dma->start_rx(priv->ioaddr, chan);
1667 }
1668
1669 /**
1670  * stmmac_start_tx_dma - start TX DMA channel
1671  * @priv: driver private structure
1672  * @chan: TX channel index
1673  * Description:
1674  * This starts a TX DMA channel
1675  */
1676 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1677 {
1678         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1679         priv->hw->dma->start_tx(priv->ioaddr, chan);
1680 }
1681
1682 /**
1683  * stmmac_stop_rx_dma - stop RX DMA channel
1684  * @priv: driver private structure
1685  * @chan: RX channel index
1686  * Description:
1687  * This stops a RX DMA channel
1688  */
1689 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1690 {
1691         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1692         priv->hw->dma->stop_rx(priv->ioaddr, chan);
1693 }
1694
1695 /**
1696  * stmmac_stop_tx_dma - stop TX DMA channel
1697  * @priv: driver private structure
1698  * @chan: TX channel index
1699  * Description:
1700  * This stops a TX DMA channel
1701  */
1702 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1703 {
1704         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1705         priv->hw->dma->stop_tx(priv->ioaddr, chan);
1706 }
1707
1708 /**
1709  * stmmac_start_all_dma - start all RX and TX DMA channels
1710  * @priv: driver private structure
1711  * Description:
1712  * This starts all the RX and TX DMA channels
1713  */
1714 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1715 {
1716         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1717         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1718         u32 chan = 0;
1719
1720         for (chan = 0; chan < rx_channels_count; chan++)
1721                 stmmac_start_rx_dma(priv, chan);
1722
1723         for (chan = 0; chan < tx_channels_count; chan++)
1724                 stmmac_start_tx_dma(priv, chan);
1725 }
1726
1727 /**
1728  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1729  * @priv: driver private structure
1730  * Description:
1731  * This stops the RX and TX DMA channels
1732  */
1733 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1734 {
1735         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1736         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1737         u32 chan = 0;
1738
1739         for (chan = 0; chan < rx_channels_count; chan++)
1740                 stmmac_stop_rx_dma(priv, chan);
1741
1742         for (chan = 0; chan < tx_channels_count; chan++)
1743                 stmmac_stop_tx_dma(priv, chan);
1744 }
1745
1746 /**
1747  *  stmmac_dma_operation_mode - HW DMA operation mode
1748  *  @priv: driver private structure
1749  *  Description: it is used for configuring the DMA operation mode register in
1750  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1751  */
1752 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1753 {
1754         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1755         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1756         int rxfifosz = priv->plat->rx_fifo_size;
1757         int txfifosz = priv->plat->tx_fifo_size;
1758         u32 txmode = 0;
1759         u32 rxmode = 0;
1760         u32 chan = 0;
1761         u8 qmode = 0;
1762
1763         if (rxfifosz == 0)
1764                 rxfifosz = priv->dma_cap.rx_fifo_size;
1765         if (txfifosz == 0)
1766                 txfifosz = priv->dma_cap.tx_fifo_size;
1767
1768         /* Adjust for real per queue fifo size */
1769         rxfifosz /= rx_channels_count;
1770         txfifosz /= tx_channels_count;
1771
1772         if (priv->plat->force_thresh_dma_mode) {
1773                 txmode = tc;
1774                 rxmode = tc;
1775         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1776                 /*
1777                  * In case of GMAC, SF mode can be enabled
1778                  * to perform the TX COE in HW. This depends on:
1779                  * 1) TX COE if actually supported
1780                  * 2) There is no bugged Jumbo frame support
1781                  *    that needs to not insert csum in the TDES.
1782                  */
1783                 txmode = SF_DMA_MODE;
1784                 rxmode = SF_DMA_MODE;
1785                 priv->xstats.threshold = SF_DMA_MODE;
1786         } else {
1787                 txmode = tc;
1788                 rxmode = SF_DMA_MODE;
1789         }
1790
1791         /* configure all channels */
1792         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1793                 for (chan = 0; chan < rx_channels_count; chan++) {
1794                         qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1795
1796                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1797                                                    rxfifosz, qmode);
1798                 }
1799
1800                 for (chan = 0; chan < tx_channels_count; chan++) {
1801                         qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1802
1803                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1804                                                    txfifosz, qmode);
1805                 }
1806         } else {
1807                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1808                                         rxfifosz);
1809         }
1810 }
1811
1812 /**
1813  * stmmac_tx_clean - to manage the transmission completion
1814  * @priv: driver private structure
1815  * @queue: TX queue index
1816  * Description: it reclaims the transmit resources after transmission completes.
1817  */
1818 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1819 {
1820         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1821         unsigned int bytes_compl = 0, pkts_compl = 0;
1822         unsigned int entry;
1823
1824         netif_tx_lock(priv->dev);
1825
1826         priv->xstats.tx_clean++;
1827
1828         entry = tx_q->dirty_tx;
1829         while (entry != tx_q->cur_tx) {
1830                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1831                 struct dma_desc *p;
1832                 int status;
1833
1834                 if (priv->extend_desc)
1835                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1836                 else
1837                         p = tx_q->dma_tx + entry;
1838
1839                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1840                                                       &priv->xstats, p,
1841                                                       priv->ioaddr);
1842                 /* Check if the descriptor is owned by the DMA */
1843                 if (unlikely(status & tx_dma_own))
1844                         break;
1845
1846                 /* Just consider the last segment and ...*/
1847                 if (likely(!(status & tx_not_ls))) {
1848                         /* ... verify the status error condition */
1849                         if (unlikely(status & tx_err)) {
1850                                 priv->dev->stats.tx_errors++;
1851                         } else {
1852                                 priv->dev->stats.tx_packets++;
1853                                 priv->xstats.tx_pkt_n++;
1854                         }
1855                         stmmac_get_tx_hwtstamp(priv, p, skb);
1856                 }
1857
1858                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1859                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1860                                 dma_unmap_page(priv->device,
1861                                                tx_q->tx_skbuff_dma[entry].buf,
1862                                                tx_q->tx_skbuff_dma[entry].len,
1863                                                DMA_TO_DEVICE);
1864                         else
1865                                 dma_unmap_single(priv->device,
1866                                                  tx_q->tx_skbuff_dma[entry].buf,
1867                                                  tx_q->tx_skbuff_dma[entry].len,
1868                                                  DMA_TO_DEVICE);
1869                         tx_q->tx_skbuff_dma[entry].buf = 0;
1870                         tx_q->tx_skbuff_dma[entry].len = 0;
1871                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1872                 }
1873
1874                 if (priv->hw->mode->clean_desc3)
1875                         priv->hw->mode->clean_desc3(tx_q, p);
1876
1877                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1878                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1879
1880                 if (likely(skb != NULL)) {
1881                         pkts_compl++;
1882                         bytes_compl += skb->len;
1883                         dev_consume_skb_any(skb);
1884                         tx_q->tx_skbuff[entry] = NULL;
1885                 }
1886
1887                 priv->hw->desc->release_tx_desc(p, priv->mode);
1888
1889                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1890         }
1891         tx_q->dirty_tx = entry;
1892
1893         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1894                                   pkts_compl, bytes_compl);
1895
1896         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1897                                                                 queue))) &&
1898             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1899
1900                 netif_dbg(priv, tx_done, priv->dev,
1901                           "%s: restart transmit\n", __func__);
1902                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1903         }
1904
1905         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1906                 stmmac_enable_eee_mode(priv);
1907                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1908         }
1909         netif_tx_unlock(priv->dev);
1910 }
1911
1912 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1913 {
1914         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1915 }
1916
1917 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1918 {
1919         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1920 }
1921
1922 /**
1923  * stmmac_tx_err - to manage the tx error
1924  * @priv: driver private structure
1925  * @chan: channel index
1926  * Description: it cleans the descriptors and restarts the transmission
1927  * in case of transmission errors.
1928  */
1929 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1930 {
1931         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1932         int i;
1933
1934         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1935
1936         stmmac_stop_tx_dma(priv, chan);
1937         dma_free_tx_skbufs(priv, chan);
1938         for (i = 0; i < DMA_TX_SIZE; i++)
1939                 if (priv->extend_desc)
1940                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1941                                                      priv->mode,
1942                                                      (i == DMA_TX_SIZE - 1));
1943                 else
1944                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1945                                                      priv->mode,
1946                                                      (i == DMA_TX_SIZE - 1));
1947         tx_q->dirty_tx = 0;
1948         tx_q->cur_tx = 0;
1949         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1950         stmmac_start_tx_dma(priv, chan);
1951
1952         priv->dev->stats.tx_errors++;
1953         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1954 }
1955
1956 /**
1957  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1958  *  @priv: driver private structure
1959  *  @txmode: TX operating mode
1960  *  @rxmode: RX operating mode
1961  *  @chan: channel index
1962  *  Description: it is used for configuring of the DMA operation mode in
1963  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1964  *  mode.
1965  */
1966 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1967                                           u32 rxmode, u32 chan)
1968 {
1969         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1970         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1971         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1972         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1973         int rxfifosz = priv->plat->rx_fifo_size;
1974         int txfifosz = priv->plat->tx_fifo_size;
1975
1976         if (rxfifosz == 0)
1977                 rxfifosz = priv->dma_cap.rx_fifo_size;
1978         if (txfifosz == 0)
1979                 txfifosz = priv->dma_cap.tx_fifo_size;
1980
1981         /* Adjust for real per queue fifo size */
1982         rxfifosz /= rx_channels_count;
1983         txfifosz /= tx_channels_count;
1984
1985         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1986                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1987                                            rxfifosz, rxqmode);
1988                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1989                                            txfifosz, txqmode);
1990         } else {
1991                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1992                                         rxfifosz);
1993         }
1994 }
1995
1996 /**
1997  * stmmac_dma_interrupt - DMA ISR
1998  * @priv: driver private structure
1999  * Description: this is the DMA ISR. It is called by the main ISR.
2000  * It calls the dwmac dma routine and schedule poll method in case of some
2001  * work can be done.
2002  */
2003 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2004 {
2005         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2006         int status;
2007         u32 chan;
2008
2009         for (chan = 0; chan < tx_channel_count; chan++) {
2010                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2011
2012                 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
2013                                                       &priv->xstats, chan);
2014                 if (likely((status & handle_rx)) || (status & handle_tx)) {
2015                         if (likely(napi_schedule_prep(&rx_q->napi))) {
2016                                 stmmac_disable_dma_irq(priv, chan);
2017                                 __napi_schedule(&rx_q->napi);
2018                         }
2019                 }
2020
2021                 if (unlikely(status & tx_hard_error_bump_tc)) {
2022                         /* Try to bump up the dma threshold on this failure */
2023                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2024                             (tc <= 256)) {
2025                                 tc += 64;
2026                                 if (priv->plat->force_thresh_dma_mode)
2027                                         stmmac_set_dma_operation_mode(priv,
2028                                                                       tc,
2029                                                                       tc,
2030                                                                       chan);
2031                                 else
2032                                         stmmac_set_dma_operation_mode(priv,
2033                                                                     tc,
2034                                                                     SF_DMA_MODE,
2035                                                                     chan);
2036                                 priv->xstats.threshold = tc;
2037                         }
2038                 } else if (unlikely(status == tx_hard_error)) {
2039                         stmmac_tx_err(priv, chan);
2040                 }
2041         }
2042 }
2043
2044 /**
2045  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2046  * @priv: driver private structure
2047  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2048  */
2049 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2050 {
2051         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2052                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2053
2054         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2055                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2056                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2057         } else {
2058                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2059                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2060         }
2061
2062         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2063
2064         if (priv->dma_cap.rmon) {
2065                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2066                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2067         } else
2068                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2069 }
2070
2071 /**
2072  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2073  * @priv: driver private structure
2074  * Description: select the Enhanced/Alternate or Normal descriptors.
2075  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2076  * supported by the HW capability register.
2077  */
2078 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2079 {
2080         if (priv->plat->enh_desc) {
2081                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2082
2083                 /* GMAC older than 3.50 has no extended descriptors */
2084                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2085                         dev_info(priv->device, "Enabled extended descriptors\n");
2086                         priv->extend_desc = 1;
2087                 } else
2088                         dev_warn(priv->device, "Extended descriptors not supported\n");
2089
2090                 priv->hw->desc = &enh_desc_ops;
2091         } else {
2092                 dev_info(priv->device, "Normal descriptors\n");
2093                 priv->hw->desc = &ndesc_ops;
2094         }
2095 }
2096
2097 /**
2098  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2099  * @priv: driver private structure
2100  * Description:
2101  *  new GMAC chip generations have a new register to indicate the
2102  *  presence of the optional feature/functions.
2103  *  This can be also used to override the value passed through the
2104  *  platform and necessary for old MAC10/100 and GMAC chips.
2105  */
2106 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2107 {
2108         u32 ret = 0;
2109
2110         if (priv->hw->dma->get_hw_feature) {
2111                 priv->hw->dma->get_hw_feature(priv->ioaddr,
2112                                               &priv->dma_cap);
2113                 ret = 1;
2114         }
2115
2116         return ret;
2117 }
2118
2119 /**
2120  * stmmac_check_ether_addr - check if the MAC addr is valid
2121  * @priv: driver private structure
2122  * Description:
2123  * it is to verify if the MAC address is valid, in case of failures it
2124  * generates a random MAC address
2125  */
2126 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2127 {
2128         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2129                 priv->hw->mac->get_umac_addr(priv->hw,
2130                                              priv->dev->dev_addr, 0);
2131                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2132                         eth_hw_addr_random(priv->dev);
2133                 netdev_info(priv->dev, "device MAC address %pM\n",
2134                             priv->dev->dev_addr);
2135         }
2136 }
2137
2138 /**
2139  * stmmac_init_dma_engine - DMA init.
2140  * @priv: driver private structure
2141  * Description:
2142  * It inits the DMA invoking the specific MAC/GMAC callback.
2143  * Some DMA parameters can be passed from the platform;
2144  * in case of these are not passed a default is kept for the MAC or GMAC.
2145  */
2146 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2147 {
2148         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2149         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2150         struct stmmac_rx_queue *rx_q;
2151         struct stmmac_tx_queue *tx_q;
2152         u32 dummy_dma_rx_phy = 0;
2153         u32 dummy_dma_tx_phy = 0;
2154         u32 chan = 0;
2155         int atds = 0;
2156         int ret = 0;
2157
2158         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2159                 dev_err(priv->device, "Invalid DMA configuration\n");
2160                 return -EINVAL;
2161         }
2162
2163         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2164                 atds = 1;
2165
2166         ret = priv->hw->dma->reset(priv->ioaddr);
2167         if (ret) {
2168                 dev_err(priv->device, "Failed to reset the dma\n");
2169                 return ret;
2170         }
2171
2172         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2173                 /* DMA Configuration */
2174                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2175                                     dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2176
2177                 /* DMA RX Channel Configuration */
2178                 for (chan = 0; chan < rx_channels_count; chan++) {
2179                         rx_q = &priv->rx_queue[chan];
2180
2181                         priv->hw->dma->init_rx_chan(priv->ioaddr,
2182                                                     priv->plat->dma_cfg,
2183                                                     rx_q->dma_rx_phy, chan);
2184
2185                         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2186                                     (DMA_RX_SIZE * sizeof(struct dma_desc));
2187                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2188                                                        rx_q->rx_tail_addr,
2189                                                        chan);
2190                 }
2191
2192                 /* DMA TX Channel Configuration */
2193                 for (chan = 0; chan < tx_channels_count; chan++) {
2194                         tx_q = &priv->tx_queue[chan];
2195
2196                         priv->hw->dma->init_chan(priv->ioaddr,
2197                                                  priv->plat->dma_cfg,
2198                                                  chan);
2199
2200                         priv->hw->dma->init_tx_chan(priv->ioaddr,
2201                                                     priv->plat->dma_cfg,
2202                                                     tx_q->dma_tx_phy, chan);
2203
2204                         tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2205                                     (DMA_TX_SIZE * sizeof(struct dma_desc));
2206                         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2207                                                        tx_q->tx_tail_addr,
2208                                                        chan);
2209                 }
2210         } else {
2211                 rx_q = &priv->rx_queue[chan];
2212                 tx_q = &priv->tx_queue[chan];
2213                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2214                                     tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2215         }
2216
2217         if (priv->plat->axi && priv->hw->dma->axi)
2218                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2219
2220         return ret;
2221 }
2222
2223 /**
2224  * stmmac_tx_timer - mitigation sw timer for tx.
2225  * @data: data pointer
2226  * Description:
2227  * This is the timer handler to directly invoke the stmmac_tx_clean.
2228  */
2229 static void stmmac_tx_timer(struct timer_list *t)
2230 {
2231         struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2232         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2233         u32 queue;
2234
2235         /* let's scan all the tx queues */
2236         for (queue = 0; queue < tx_queues_count; queue++)
2237                 stmmac_tx_clean(priv, queue);
2238 }
2239
2240 /**
2241  * stmmac_init_tx_coalesce - init tx mitigation options.
2242  * @priv: driver private structure
2243  * Description:
2244  * This inits the transmit coalesce parameters: i.e. timer rate,
2245  * timer handler and default threshold used for enabling the
2246  * interrupt on completion bit.
2247  */
2248 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2249 {
2250         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2251         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2252         timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2253         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2254         add_timer(&priv->txtimer);
2255 }
2256
2257 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2258 {
2259         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2260         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2261         u32 chan;
2262
2263         /* set TX ring length */
2264         if (priv->hw->dma->set_tx_ring_len) {
2265                 for (chan = 0; chan < tx_channels_count; chan++)
2266                         priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2267                                                        (DMA_TX_SIZE - 1), chan);
2268         }
2269
2270         /* set RX ring length */
2271         if (priv->hw->dma->set_rx_ring_len) {
2272                 for (chan = 0; chan < rx_channels_count; chan++)
2273                         priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2274                                                        (DMA_RX_SIZE - 1), chan);
2275         }
2276 }
2277
2278 /**
2279  *  stmmac_set_tx_queue_weight - Set TX queue weight
2280  *  @priv: driver private structure
2281  *  Description: It is used for setting TX queues weight
2282  */
2283 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2284 {
2285         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2286         u32 weight;
2287         u32 queue;
2288
2289         for (queue = 0; queue < tx_queues_count; queue++) {
2290                 weight = priv->plat->tx_queues_cfg[queue].weight;
2291                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2292         }
2293 }
2294
2295 /**
2296  *  stmmac_configure_cbs - Configure CBS in TX queue
2297  *  @priv: driver private structure
2298  *  Description: It is used for configuring CBS in AVB TX queues
2299  */
2300 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2301 {
2302         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2303         u32 mode_to_use;
2304         u32 queue;
2305
2306         /* queue 0 is reserved for legacy traffic */
2307         for (queue = 1; queue < tx_queues_count; queue++) {
2308                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2309                 if (mode_to_use == MTL_QUEUE_DCB)
2310                         continue;
2311
2312                 priv->hw->mac->config_cbs(priv->hw,
2313                                 priv->plat->tx_queues_cfg[queue].send_slope,
2314                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2315                                 priv->plat->tx_queues_cfg[queue].high_credit,
2316                                 priv->plat->tx_queues_cfg[queue].low_credit,
2317                                 queue);
2318         }
2319 }
2320
2321 /**
2322  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2323  *  @priv: driver private structure
2324  *  Description: It is used for mapping RX queues to RX dma channels
2325  */
2326 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2327 {
2328         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2329         u32 queue;
2330         u32 chan;
2331
2332         for (queue = 0; queue < rx_queues_count; queue++) {
2333                 chan = priv->plat->rx_queues_cfg[queue].chan;
2334                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2335         }
2336 }
2337
2338 /**
2339  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2340  *  @priv: driver private structure
2341  *  Description: It is used for configuring the RX Queue Priority
2342  */
2343 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2344 {
2345         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2346         u32 queue;
2347         u32 prio;
2348
2349         for (queue = 0; queue < rx_queues_count; queue++) {
2350                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2351                         continue;
2352
2353                 prio = priv->plat->rx_queues_cfg[queue].prio;
2354                 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2355         }
2356 }
2357
2358 /**
2359  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2360  *  @priv: driver private structure
2361  *  Description: It is used for configuring the TX Queue Priority
2362  */
2363 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2364 {
2365         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2366         u32 queue;
2367         u32 prio;
2368
2369         for (queue = 0; queue < tx_queues_count; queue++) {
2370                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2371                         continue;
2372
2373                 prio = priv->plat->tx_queues_cfg[queue].prio;
2374                 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2375         }
2376 }
2377
2378 /**
2379  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2380  *  @priv: driver private structure
2381  *  Description: It is used for configuring the RX queue routing
2382  */
2383 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2384 {
2385         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2386         u32 queue;
2387         u8 packet;
2388
2389         for (queue = 0; queue < rx_queues_count; queue++) {
2390                 /* no specific packet type routing specified for the queue */
2391                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2392                         continue;
2393
2394                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2395                 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2396         }
2397 }
2398
2399 /**
2400  *  stmmac_mtl_configuration - Configure MTL
2401  *  @priv: driver private structure
2402  *  Description: It is used for configurring MTL
2403  */
2404 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2405 {
2406         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2407         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2408
2409         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2410                 stmmac_set_tx_queue_weight(priv);
2411
2412         /* Configure MTL RX algorithms */
2413         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2414                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2415                                                 priv->plat->rx_sched_algorithm);
2416
2417         /* Configure MTL TX algorithms */
2418         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2419                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2420                                                 priv->plat->tx_sched_algorithm);
2421
2422         /* Configure CBS in AVB TX queues */
2423         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2424                 stmmac_configure_cbs(priv);
2425
2426         /* Map RX MTL to DMA channels */
2427         if (priv->hw->mac->map_mtl_to_dma)
2428                 stmmac_rx_queue_dma_chan_map(priv);
2429
2430         /* Enable MAC RX Queues */
2431         if (priv->hw->mac->rx_queue_enable)
2432                 stmmac_mac_enable_rx_queues(priv);
2433
2434         /* Set RX priorities */
2435         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2436                 stmmac_mac_config_rx_queues_prio(priv);
2437
2438         /* Set TX priorities */
2439         if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2440                 stmmac_mac_config_tx_queues_prio(priv);
2441
2442         /* Set RX routing */
2443         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2444                 stmmac_mac_config_rx_queues_routing(priv);
2445 }
2446
2447 /**
2448  * stmmac_hw_setup - setup mac in a usable state.
2449  *  @dev : pointer to the device structure.
2450  *  Description:
2451  *  this is the main function to setup the HW in a usable state because the
2452  *  dma engine is reset, the core registers are configured (e.g. AXI,
2453  *  Checksum features, timers). The DMA is ready to start receiving and
2454  *  transmitting.
2455  *  Return value:
2456  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2457  *  file on failure.
2458  */
2459 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2460 {
2461         struct stmmac_priv *priv = netdev_priv(dev);
2462         u32 rx_cnt = priv->plat->rx_queues_to_use;
2463         u32 tx_cnt = priv->plat->tx_queues_to_use;
2464         u32 chan;
2465         int ret;
2466
2467         /* DMA initialization and SW reset */
2468         ret = stmmac_init_dma_engine(priv);
2469         if (ret < 0) {
2470                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2471                            __func__);
2472                 return ret;
2473         }
2474
2475         /* Copy the MAC addr into the HW  */
2476         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2477
2478         /* PS and related bits will be programmed according to the speed */
2479         if (priv->hw->pcs) {
2480                 int speed = priv->plat->mac_port_sel_speed;
2481
2482                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2483                     (speed == SPEED_1000)) {
2484                         priv->hw->ps = speed;
2485                 } else {
2486                         dev_warn(priv->device, "invalid port speed\n");
2487                         priv->hw->ps = 0;
2488                 }
2489         }
2490
2491         /* Initialize the MAC Core */
2492         priv->hw->mac->core_init(priv->hw, dev->mtu);
2493
2494         /* Initialize MTL*/
2495         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2496                 stmmac_mtl_configuration(priv);
2497
2498         ret = priv->hw->mac->rx_ipc(priv->hw);
2499         if (!ret) {
2500                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2501                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2502                 priv->hw->rx_csum = 0;
2503         }
2504
2505         /* Enable the MAC Rx/Tx */
2506         priv->hw->mac->set_mac(priv->ioaddr, true);
2507
2508         /* Set the HW DMA mode and the COE */
2509         stmmac_dma_operation_mode(priv);
2510
2511         stmmac_mmc_setup(priv);
2512
2513         if (init_ptp) {
2514                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2515                 if (ret < 0)
2516                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2517
2518                 ret = stmmac_init_ptp(priv);
2519                 if (ret == -EOPNOTSUPP)
2520                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2521                 else if (ret)
2522                         netdev_warn(priv->dev, "PTP init failed\n");
2523         }
2524
2525 #ifdef CONFIG_DEBUG_FS
2526         ret = stmmac_init_fs(dev);
2527         if (ret < 0)
2528                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2529                             __func__);
2530 #endif
2531         /* Start the ball rolling... */
2532         stmmac_start_all_dma(priv);
2533
2534         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2535
2536         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2537                 priv->rx_riwt = MAX_DMA_RIWT;
2538                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2539         }
2540
2541         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2542                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2543
2544         /* set TX and RX rings length */
2545         stmmac_set_rings_length(priv);
2546
2547         /* Enable TSO */
2548         if (priv->tso) {
2549                 for (chan = 0; chan < tx_cnt; chan++)
2550                         priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2551         }
2552
2553         return 0;
2554 }
2555
2556 static void stmmac_hw_teardown(struct net_device *dev)
2557 {
2558         struct stmmac_priv *priv = netdev_priv(dev);
2559
2560         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2561 }
2562
2563 /**
2564  *  stmmac_open - open entry point of the driver
2565  *  @dev : pointer to the device structure.
2566  *  Description:
2567  *  This function is the open entry point of the driver.
2568  *  Return value:
2569  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2570  *  file on failure.
2571  */
2572 static int stmmac_open(struct net_device *dev)
2573 {
2574         struct stmmac_priv *priv = netdev_priv(dev);
2575         int ret;
2576
2577         stmmac_check_ether_addr(priv);
2578
2579         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2580             priv->hw->pcs != STMMAC_PCS_TBI &&
2581             priv->hw->pcs != STMMAC_PCS_RTBI) {
2582                 ret = stmmac_init_phy(dev);
2583                 if (ret) {
2584                         netdev_err(priv->dev,
2585                                    "%s: Cannot attach to PHY (error: %d)\n",
2586                                    __func__, ret);
2587                         return ret;
2588                 }
2589         }
2590
2591         /* Extra statistics */
2592         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2593         priv->xstats.threshold = tc;
2594
2595         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2596         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2597         priv->mss = 0;
2598
2599         ret = alloc_dma_desc_resources(priv);
2600         if (ret < 0) {
2601                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2602                            __func__);
2603                 goto dma_desc_error;
2604         }
2605
2606         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2607         if (ret < 0) {
2608                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2609                            __func__);
2610                 goto init_error;
2611         }
2612
2613         ret = stmmac_hw_setup(dev, true);
2614         if (ret < 0) {
2615                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2616                 goto init_error;
2617         }
2618
2619         stmmac_init_tx_coalesce(priv);
2620
2621         if (dev->phydev)
2622                 phy_start(dev->phydev);
2623
2624         /* Request the IRQ lines */
2625         ret = request_irq(dev->irq, stmmac_interrupt,
2626                           IRQF_SHARED, dev->name, dev);
2627         if (unlikely(ret < 0)) {
2628                 netdev_err(priv->dev,
2629                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2630                            __func__, dev->irq, ret);
2631                 goto irq_error;
2632         }
2633
2634         /* Request the Wake IRQ in case of another line is used for WoL */
2635         if (priv->wol_irq != dev->irq) {
2636                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2637                                   IRQF_SHARED, dev->name, dev);
2638                 if (unlikely(ret < 0)) {
2639                         netdev_err(priv->dev,
2640                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2641                                    __func__, priv->wol_irq, ret);
2642                         goto wolirq_error;
2643                 }
2644         }
2645
2646         /* Request the IRQ lines */
2647         if (priv->lpi_irq > 0) {
2648                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2649                                   dev->name, dev);
2650                 if (unlikely(ret < 0)) {
2651                         netdev_err(priv->dev,
2652                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2653                                    __func__, priv->lpi_irq, ret);
2654                         goto lpiirq_error;
2655                 }
2656         }
2657
2658         stmmac_enable_all_queues(priv);
2659         stmmac_start_all_queues(priv);
2660
2661         return 0;
2662
2663 lpiirq_error:
2664         if (priv->wol_irq != dev->irq)
2665                 free_irq(priv->wol_irq, dev);
2666 wolirq_error:
2667         free_irq(dev->irq, dev);
2668 irq_error:
2669         if (dev->phydev)
2670                 phy_stop(dev->phydev);
2671
2672         del_timer_sync(&priv->txtimer);
2673         stmmac_hw_teardown(dev);
2674 init_error:
2675         free_dma_desc_resources(priv);
2676 dma_desc_error:
2677         if (dev->phydev)
2678                 phy_disconnect(dev->phydev);
2679
2680         return ret;
2681 }
2682
2683 /**
2684  *  stmmac_release - close entry point of the driver
2685  *  @dev : device pointer.
2686  *  Description:
2687  *  This is the stop entry point of the driver.
2688  */
2689 static int stmmac_release(struct net_device *dev)
2690 {
2691         struct stmmac_priv *priv = netdev_priv(dev);
2692
2693         if (priv->eee_enabled)
2694                 del_timer_sync(&priv->eee_ctrl_timer);
2695
2696         /* Stop and disconnect the PHY */
2697         if (dev->phydev) {
2698                 phy_stop(dev->phydev);
2699                 phy_disconnect(dev->phydev);
2700         }
2701
2702         stmmac_stop_all_queues(priv);
2703
2704         stmmac_disable_all_queues(priv);
2705
2706         del_timer_sync(&priv->txtimer);
2707
2708         /* Free the IRQ lines */
2709         free_irq(dev->irq, dev);
2710         if (priv->wol_irq != dev->irq)
2711                 free_irq(priv->wol_irq, dev);
2712         if (priv->lpi_irq > 0)
2713                 free_irq(priv->lpi_irq, dev);
2714
2715         /* Stop TX/RX DMA and clear the descriptors */
2716         stmmac_stop_all_dma(priv);
2717
2718         /* Release and free the Rx/Tx resources */
2719         free_dma_desc_resources(priv);
2720
2721         /* Disable the MAC Rx/Tx */
2722         priv->hw->mac->set_mac(priv->ioaddr, false);
2723
2724         netif_carrier_off(dev);
2725
2726 #ifdef CONFIG_DEBUG_FS
2727         stmmac_exit_fs(dev);
2728 #endif
2729
2730         stmmac_release_ptp(priv);
2731
2732         return 0;
2733 }
2734
2735 /**
2736  *  stmmac_tso_allocator - close entry point of the driver
2737  *  @priv: driver private structure
2738  *  @des: buffer start address
2739  *  @total_len: total length to fill in descriptors
2740  *  @last_segmant: condition for the last descriptor
2741  *  @queue: TX queue index
2742  *  Description:
2743  *  This function fills descriptor and request new descriptors according to
2744  *  buffer length to fill
2745  */
2746 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2747                                  int total_len, bool last_segment, u32 queue)
2748 {
2749         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2750         struct dma_desc *desc;
2751         u32 buff_size;
2752         int tmp_len;
2753
2754         tmp_len = total_len;
2755
2756         while (tmp_len > 0) {
2757                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2758                 desc = tx_q->dma_tx + tx_q->cur_tx;
2759
2760                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2761                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2762                             TSO_MAX_BUFF_SIZE : tmp_len;
2763
2764                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2765                         0, 1,
2766                         (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2767                         0, 0);
2768
2769                 tmp_len -= TSO_MAX_BUFF_SIZE;
2770         }
2771 }
2772
2773 /**
2774  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2775  *  @skb : the socket buffer
2776  *  @dev : device pointer
2777  *  Description: this is the transmit function that is called on TSO frames
2778  *  (support available on GMAC4 and newer chips).
2779  *  Diagram below show the ring programming in case of TSO frames:
2780  *
2781  *  First Descriptor
2782  *   --------
2783  *   | DES0 |---> buffer1 = L2/L3/L4 header
2784  *   | DES1 |---> TCP Payload (can continue on next descr...)
2785  *   | DES2 |---> buffer 1 and 2 len
2786  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2787  *   --------
2788  *      |
2789  *     ...
2790  *      |
2791  *   --------
2792  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2793  *   | DES1 | --|
2794  *   | DES2 | --> buffer 1 and 2 len
2795  *   | DES3 |
2796  *   --------
2797  *
2798  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2799  */
2800 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2801 {
2802         struct dma_desc *desc, *first, *mss_desc = NULL;
2803         struct stmmac_priv *priv = netdev_priv(dev);
2804         int nfrags = skb_shinfo(skb)->nr_frags;
2805         u32 queue = skb_get_queue_mapping(skb);
2806         unsigned int first_entry, des;
2807         struct stmmac_tx_queue *tx_q;
2808         int tmp_pay_len = 0;
2809         u32 pay_len, mss;
2810         u8 proto_hdr_len;
2811         int i;
2812
2813         tx_q = &priv->tx_queue[queue];
2814
2815         /* Compute header lengths */
2816         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2817
2818         /* Desc availability based on threshold should be enough safe */
2819         if (unlikely(stmmac_tx_avail(priv, queue) <
2820                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2821                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2822                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2823                                                                 queue));
2824                         /* This is a hard error, log it. */
2825                         netdev_err(priv->dev,
2826                                    "%s: Tx Ring full when queue awake\n",
2827                                    __func__);
2828                 }
2829                 return NETDEV_TX_BUSY;
2830         }
2831
2832         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2833
2834         mss = skb_shinfo(skb)->gso_size;
2835
2836         /* set new MSS value if needed */
2837         if (mss != priv->mss) {
2838                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2839                 priv->hw->desc->set_mss(mss_desc, mss);
2840                 priv->mss = mss;
2841                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2842         }
2843
2844         if (netif_msg_tx_queued(priv)) {
2845                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2846                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2847                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2848                         skb->data_len);
2849         }
2850
2851         first_entry = tx_q->cur_tx;
2852
2853         desc = tx_q->dma_tx + first_entry;
2854         first = desc;
2855
2856         /* first descriptor: fill Headers on Buf1 */
2857         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2858                              DMA_TO_DEVICE);
2859         if (dma_mapping_error(priv->device, des))
2860                 goto dma_map_err;
2861
2862         tx_q->tx_skbuff_dma[first_entry].buf = des;
2863         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2864
2865         first->des0 = cpu_to_le32(des);
2866
2867         /* Fill start of payload in buff2 of first descriptor */
2868         if (pay_len)
2869                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2870
2871         /* If needed take extra descriptors to fill the remaining payload */
2872         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2873
2874         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2875
2876         /* Prepare fragments */
2877         for (i = 0; i < nfrags; i++) {
2878                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2879
2880                 des = skb_frag_dma_map(priv->device, frag, 0,
2881                                        skb_frag_size(frag),
2882                                        DMA_TO_DEVICE);
2883                 if (dma_mapping_error(priv->device, des))
2884                         goto dma_map_err;
2885
2886                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2887                                      (i == nfrags - 1), queue);
2888
2889                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2890                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2891                 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2892                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2893         }
2894
2895         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2896
2897         /* Only the last descriptor gets to point to the skb. */
2898         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2899
2900         /* We've used all descriptors we need for this skb, however,
2901          * advance cur_tx so that it references a fresh descriptor.
2902          * ndo_start_xmit will fill this descriptor the next time it's
2903          * called and stmmac_tx_clean may clean up to this descriptor.
2904          */
2905         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2906
2907         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2908                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2909                           __func__);
2910                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2911         }
2912
2913         dev->stats.tx_bytes += skb->len;
2914         priv->xstats.tx_tso_frames++;
2915         priv->xstats.tx_tso_nfrags += nfrags;
2916
2917         /* Manage tx mitigation */
2918         priv->tx_count_frames += nfrags + 1;
2919         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2920                 mod_timer(&priv->txtimer,
2921                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2922         } else {
2923                 priv->tx_count_frames = 0;
2924                 priv->hw->desc->set_tx_ic(desc);
2925                 priv->xstats.tx_set_ic_bit++;
2926         }
2927
2928         skb_tx_timestamp(skb);
2929
2930         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2931                      priv->hwts_tx_en)) {
2932                 /* declare that device is doing timestamping */
2933                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2934                 priv->hw->desc->enable_tx_timestamp(first);
2935         }
2936
2937         /* Complete the first descriptor before granting the DMA */
2938         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2939                         proto_hdr_len,
2940                         pay_len,
2941                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2942                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2943
2944         /* If context desc is used to change MSS */
2945         if (mss_desc)
2946                 priv->hw->desc->set_tx_owner(mss_desc);
2947
2948         /* The own bit must be the latest setting done when prepare the
2949          * descriptor and then barrier is needed to make sure that
2950          * all is coherent before granting the DMA engine.
2951          */
2952         dma_wmb();
2953
2954         if (netif_msg_pktdata(priv)) {
2955                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2956                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2957                         tx_q->cur_tx, first, nfrags);
2958
2959                 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2960                                              0);
2961
2962                 pr_info(">>> frame to be transmitted: ");
2963                 print_pkt(skb->data, skb_headlen(skb));
2964         }
2965
2966         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2967
2968         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2969                                        queue);
2970
2971         return NETDEV_TX_OK;
2972
2973 dma_map_err:
2974         dev_err(priv->device, "Tx dma map failed\n");
2975         dev_kfree_skb(skb);
2976         priv->dev->stats.tx_dropped++;
2977         return NETDEV_TX_OK;
2978 }
2979
2980 /**
2981  *  stmmac_xmit - Tx entry point of the driver
2982  *  @skb : the socket buffer
2983  *  @dev : device pointer
2984  *  Description : this is the tx entry point of the driver.
2985  *  It programs the chain or the ring and supports oversized frames
2986  *  and SG feature.
2987  */
2988 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2989 {
2990         struct stmmac_priv *priv = netdev_priv(dev);
2991         unsigned int nopaged_len = skb_headlen(skb);
2992         int i, csum_insertion = 0, is_jumbo = 0;
2993         u32 queue = skb_get_queue_mapping(skb);
2994         int nfrags = skb_shinfo(skb)->nr_frags;
2995         int entry;
2996         unsigned int first_entry;
2997         struct dma_desc *desc, *first;
2998         struct stmmac_tx_queue *tx_q;
2999         unsigned int enh_desc;
3000         unsigned int des;
3001
3002         tx_q = &priv->tx_queue[queue];
3003
3004         /* Manage oversized TCP frames for GMAC4 device */
3005         if (skb_is_gso(skb) && priv->tso) {
3006                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3007                         return stmmac_tso_xmit(skb, dev);
3008         }
3009
3010         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3011                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3012                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3013                                                                 queue));
3014                         /* This is a hard error, log it. */
3015                         netdev_err(priv->dev,
3016                                    "%s: Tx Ring full when queue awake\n",
3017                                    __func__);
3018                 }
3019                 return NETDEV_TX_BUSY;
3020         }
3021
3022         if (priv->tx_path_in_lpi_mode)
3023                 stmmac_disable_eee_mode(priv);
3024
3025         entry = tx_q->cur_tx;
3026         first_entry = entry;
3027
3028         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3029
3030         if (likely(priv->extend_desc))
3031                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3032         else
3033                 desc = tx_q->dma_tx + entry;
3034
3035         first = desc;
3036
3037         enh_desc = priv->plat->enh_desc;
3038         /* To program the descriptors according to the size of the frame */
3039         if (enh_desc)
3040                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3041
3042         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3043                                          DWMAC_CORE_4_00)) {
3044                 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3045                 if (unlikely(entry < 0))
3046                         goto dma_map_err;
3047         }
3048
3049         for (i = 0; i < nfrags; i++) {
3050                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3051                 int len = skb_frag_size(frag);
3052                 bool last_segment = (i == (nfrags - 1));
3053
3054                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3055
3056                 if (likely(priv->extend_desc))
3057                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3058                 else
3059                         desc = tx_q->dma_tx + entry;
3060
3061                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3062                                        DMA_TO_DEVICE);
3063                 if (dma_mapping_error(priv->device, des))
3064                         goto dma_map_err; /* should reuse desc w/o issues */
3065
3066                 tx_q->tx_skbuff[entry] = NULL;
3067
3068                 tx_q->tx_skbuff_dma[entry].buf = des;
3069                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3070                         desc->des0 = cpu_to_le32(des);
3071                 else
3072                         desc->des2 = cpu_to_le32(des);
3073
3074                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3075                 tx_q->tx_skbuff_dma[entry].len = len;
3076                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3077
3078                 /* Prepare the descriptor and set the own bit too */
3079                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3080                                                 priv->mode, 1, last_segment,
3081                                                 skb->len);
3082         }
3083
3084         /* Only the last descriptor gets to point to the skb. */
3085         tx_q->tx_skbuff[entry] = skb;
3086
3087         /* We've used all descriptors we need for this skb, however,
3088          * advance cur_tx so that it references a fresh descriptor.
3089          * ndo_start_xmit will fill this descriptor the next time it's
3090          * called and stmmac_tx_clean may clean up to this descriptor.
3091          */
3092         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3093         tx_q->cur_tx = entry;
3094
3095         if (netif_msg_pktdata(priv)) {
3096                 void *tx_head;
3097
3098                 netdev_dbg(priv->dev,
3099                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3100                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3101                            entry, first, nfrags);
3102
3103                 if (priv->extend_desc)
3104                         tx_head = (void *)tx_q->dma_etx;
3105                 else
3106                         tx_head = (void *)tx_q->dma_tx;
3107
3108                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3109
3110                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3111                 print_pkt(skb->data, skb->len);
3112         }
3113
3114         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3115                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3116                           __func__);
3117                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3118         }
3119
3120         dev->stats.tx_bytes += skb->len;
3121
3122         /* According to the coalesce parameter the IC bit for the latest
3123          * segment is reset and the timer re-started to clean the tx status.
3124          * This approach takes care about the fragments: desc is the first
3125          * element in case of no SG.
3126          */
3127         priv->tx_count_frames += nfrags + 1;
3128         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3129                 mod_timer(&priv->txtimer,
3130                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3131         } else {
3132                 priv->tx_count_frames = 0;
3133                 priv->hw->desc->set_tx_ic(desc);
3134                 priv->xstats.tx_set_ic_bit++;
3135         }
3136
3137         skb_tx_timestamp(skb);
3138
3139         /* Ready to fill the first descriptor and set the OWN bit w/o any
3140          * problems because all the descriptors are actually ready to be
3141          * passed to the DMA engine.
3142          */
3143         if (likely(!is_jumbo)) {
3144                 bool last_segment = (nfrags == 0);
3145
3146                 des = dma_map_single(priv->device, skb->data,
3147                                      nopaged_len, DMA_TO_DEVICE);
3148                 if (dma_mapping_error(priv->device, des))
3149                         goto dma_map_err;
3150
3151                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3152                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3153                         first->des0 = cpu_to_le32(des);
3154                 else
3155                         first->des2 = cpu_to_le32(des);
3156
3157                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3158                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3159
3160                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3161                              priv->hwts_tx_en)) {
3162                         /* declare that device is doing timestamping */
3163                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3164                         priv->hw->desc->enable_tx_timestamp(first);
3165                 }
3166
3167                 /* Prepare the first descriptor setting the OWN bit too */
3168                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3169                                                 csum_insertion, priv->mode, 1,
3170                                                 last_segment, skb->len);
3171
3172                 /* The own bit must be the latest setting done when prepare the
3173                  * descriptor and then barrier is needed to make sure that
3174                  * all is coherent before granting the DMA engine.
3175                  */
3176                 dma_wmb();
3177         }
3178
3179         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3180
3181         if (priv->synopsys_id < DWMAC_CORE_4_00)
3182                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3183         else
3184                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3185                                                queue);
3186
3187         return NETDEV_TX_OK;
3188
3189 dma_map_err:
3190         netdev_err(priv->dev, "Tx DMA map failed\n");
3191         dev_kfree_skb(skb);
3192         priv->dev->stats.tx_dropped++;
3193         return NETDEV_TX_OK;
3194 }
3195
3196 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3197 {
3198         struct ethhdr *ehdr;
3199         u16 vlanid;
3200
3201         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3202             NETIF_F_HW_VLAN_CTAG_RX &&
3203             !__vlan_get_tag(skb, &vlanid)) {
3204                 /* pop the vlan tag */
3205                 ehdr = (struct ethhdr *)skb->data;
3206                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3207                 skb_pull(skb, VLAN_HLEN);
3208                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3209         }
3210 }
3211
3212
3213 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3214 {
3215         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3216                 return 0;
3217
3218         return 1;
3219 }
3220
3221 /**
3222  * stmmac_rx_refill - refill used skb preallocated buffers
3223  * @priv: driver private structure
3224  * @queue: RX queue index
3225  * Description : this is to reallocate the skb for the reception process
3226  * that is based on zero-copy.
3227  */
3228 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3229 {
3230         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3231         int dirty = stmmac_rx_dirty(priv, queue);
3232         unsigned int entry = rx_q->dirty_rx;
3233
3234         int bfsize = priv->dma_buf_sz;
3235
3236         while (dirty-- > 0) {
3237                 struct dma_desc *p;
3238
3239                 if (priv->extend_desc)
3240                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3241                 else
3242                         p = rx_q->dma_rx + entry;
3243
3244                 if (likely(!rx_q->rx_skbuff[entry])) {
3245                         struct sk_buff *skb;
3246
3247                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3248                         if (unlikely(!skb)) {
3249                                 /* so for a while no zero-copy! */
3250                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3251                                 if (unlikely(net_ratelimit()))
3252                                         dev_err(priv->device,
3253                                                 "fail to alloc skb entry %d\n",
3254                                                 entry);
3255                                 break;
3256                         }
3257
3258                         rx_q->rx_skbuff[entry] = skb;
3259                         rx_q->rx_skbuff_dma[entry] =
3260                             dma_map_single(priv->device, skb->data, bfsize,
3261                                            DMA_FROM_DEVICE);
3262                         if (dma_mapping_error(priv->device,
3263                                               rx_q->rx_skbuff_dma[entry])) {
3264                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3265                                 dev_kfree_skb(skb);
3266                                 break;
3267                         }
3268
3269                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3270                                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3271                                 p->des1 = 0;
3272                         } else {
3273                                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3274                         }
3275                         if (priv->hw->mode->refill_desc3)
3276                                 priv->hw->mode->refill_desc3(rx_q, p);
3277
3278                         if (rx_q->rx_zeroc_thresh > 0)
3279                                 rx_q->rx_zeroc_thresh--;
3280
3281                         netif_dbg(priv, rx_status, priv->dev,
3282                                   "refill entry #%d\n", entry);
3283                 }
3284                 dma_wmb();
3285
3286                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3287                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3288                 else
3289                         priv->hw->desc->set_rx_owner(p);
3290
3291                 dma_wmb();
3292
3293                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3294         }
3295         rx_q->dirty_rx = entry;
3296 }
3297
3298 /**
3299  * stmmac_rx - manage the receive process
3300  * @priv: driver private structure
3301  * @limit: napi bugget
3302  * @queue: RX queue index.
3303  * Description :  this the function called by the napi poll method.
3304  * It gets all the frames inside the ring.
3305  */
3306 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3307 {
3308         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3309         unsigned int entry = rx_q->cur_rx;
3310         int coe = priv->hw->rx_csum;
3311         unsigned int next_entry;
3312         unsigned int count = 0;
3313
3314         if (netif_msg_rx_status(priv)) {
3315                 void *rx_head;
3316
3317                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3318                 if (priv->extend_desc)
3319                         rx_head = (void *)rx_q->dma_erx;
3320                 else
3321                         rx_head = (void *)rx_q->dma_rx;
3322
3323                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3324         }
3325         while (count < limit) {
3326                 int status;
3327                 struct dma_desc *p;
3328                 struct dma_desc *np;
3329
3330                 if (priv->extend_desc)
3331                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3332                 else
3333                         p = rx_q->dma_rx + entry;
3334
3335                 /* read the status of the incoming frame */
3336                 status = priv->hw->desc->rx_status(&priv->dev->stats,
3337                                                    &priv->xstats, p);
3338                 /* check if managed by the DMA otherwise go ahead */
3339                 if (unlikely(status & dma_own))
3340                         break;
3341
3342                 count++;
3343
3344                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3345                 next_entry = rx_q->cur_rx;
3346
3347                 if (priv->extend_desc)
3348                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3349                 else
3350                         np = rx_q->dma_rx + next_entry;
3351
3352                 prefetch(np);
3353
3354                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3355                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
3356                                                            &priv->xstats,
3357                                                            rx_q->dma_erx +
3358                                                            entry);
3359                 if (unlikely(status == discard_frame)) {
3360                         priv->dev->stats.rx_errors++;
3361                         if (priv->hwts_rx_en && !priv->extend_desc) {
3362                                 /* DESC2 & DESC3 will be overwritten by device
3363                                  * with timestamp value, hence reinitialize
3364                                  * them in stmmac_rx_refill() function so that
3365                                  * device can reuse it.
3366                                  */
3367                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3368                                 rx_q->rx_skbuff[entry] = NULL;
3369                                 dma_unmap_single(priv->device,
3370                                                  rx_q->rx_skbuff_dma[entry],
3371                                                  priv->dma_buf_sz,
3372                                                  DMA_FROM_DEVICE);
3373                         }
3374                 } else {
3375                         struct sk_buff *skb;
3376                         int frame_len;
3377                         unsigned int des;
3378
3379                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3380                                 des = le32_to_cpu(p->des0);
3381                         else
3382                                 des = le32_to_cpu(p->des2);
3383
3384                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3385
3386                         /*  If frame length is greater than skb buffer size
3387                          *  (preallocated during init) then the packet is
3388                          *  ignored
3389                          */
3390                         if (frame_len > priv->dma_buf_sz) {
3391                                 netdev_err(priv->dev,
3392                                            "len %d larger than size (%d)\n",
3393                                            frame_len, priv->dma_buf_sz);
3394                                 priv->dev->stats.rx_length_errors++;
3395                                 break;
3396                         }
3397
3398                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3399                          * Type frames (LLC/LLC-SNAP)
3400                          */
3401                         if (unlikely(status != llc_snap))
3402                                 frame_len -= ETH_FCS_LEN;
3403
3404                         if (netif_msg_rx_status(priv)) {
3405                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3406                                            p, entry, des);
3407                                 if (frame_len > ETH_FRAME_LEN)
3408                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3409                                                    frame_len, status);
3410                         }
3411
3412                         /* The zero-copy is always used for all the sizes
3413                          * in case of GMAC4 because it needs
3414                          * to refill the used descriptors, always.
3415                          */
3416                         if (unlikely(!priv->plat->has_gmac4 &&
3417                                      ((frame_len < priv->rx_copybreak) ||
3418                                      stmmac_rx_threshold_count(rx_q)))) {
3419                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3420                                                                 frame_len);
3421                                 if (unlikely(!skb)) {
3422                                         if (net_ratelimit())
3423                                                 dev_warn(priv->device,
3424                                                          "packet dropped\n");
3425                                         priv->dev->stats.rx_dropped++;
3426                                         break;
3427                                 }
3428
3429                                 dma_sync_single_for_cpu(priv->device,
3430                                                         rx_q->rx_skbuff_dma
3431                                                         [entry], frame_len,
3432                                                         DMA_FROM_DEVICE);
3433                                 skb_copy_to_linear_data(skb,
3434                                                         rx_q->
3435                                                         rx_skbuff[entry]->data,
3436                                                         frame_len);
3437
3438                                 skb_put(skb, frame_len);
3439                                 dma_sync_single_for_device(priv->device,
3440                                                            rx_q->rx_skbuff_dma
3441                                                            [entry], frame_len,
3442                                                            DMA_FROM_DEVICE);
3443                         } else {
3444                                 skb = rx_q->rx_skbuff[entry];
3445                                 if (unlikely(!skb)) {
3446                                         netdev_err(priv->dev,
3447                                                    "%s: Inconsistent Rx chain\n",
3448                                                    priv->dev->name);
3449                                         priv->dev->stats.rx_dropped++;
3450                                         break;
3451                                 }
3452                                 prefetch(skb->data - NET_IP_ALIGN);
3453                                 rx_q->rx_skbuff[entry] = NULL;
3454                                 rx_q->rx_zeroc_thresh++;
3455
3456                                 skb_put(skb, frame_len);
3457                                 dma_unmap_single(priv->device,
3458                                                  rx_q->rx_skbuff_dma[entry],
3459                                                  priv->dma_buf_sz,
3460                                                  DMA_FROM_DEVICE);
3461                         }
3462
3463                         if (netif_msg_pktdata(priv)) {
3464                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3465                                            frame_len);
3466                                 print_pkt(skb->data, frame_len);
3467                         }
3468
3469                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3470
3471                         stmmac_rx_vlan(priv->dev, skb);
3472
3473                         skb->protocol = eth_type_trans(skb, priv->dev);
3474
3475                         if (unlikely(!coe))
3476                                 skb_checksum_none_assert(skb);
3477                         else
3478                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3479
3480                         napi_gro_receive(&rx_q->napi, skb);
3481
3482                         priv->dev->stats.rx_packets++;
3483                         priv->dev->stats.rx_bytes += frame_len;
3484                 }
3485                 entry = next_entry;
3486         }
3487
3488         stmmac_rx_refill(priv, queue);
3489
3490         priv->xstats.rx_pkt_n += count;
3491
3492         return count;
3493 }
3494
3495 /**
3496  *  stmmac_poll - stmmac poll method (NAPI)
3497  *  @napi : pointer to the napi structure.
3498  *  @budget : maximum number of packets that the current CPU can receive from
3499  *            all interfaces.
3500  *  Description :
3501  *  To look at the incoming frames and clear the tx resources.
3502  */
3503 static int stmmac_poll(struct napi_struct *napi, int budget)
3504 {
3505         struct stmmac_rx_queue *rx_q =
3506                 container_of(napi, struct stmmac_rx_queue, napi);
3507         struct stmmac_priv *priv = rx_q->priv_data;
3508         u32 tx_count = priv->plat->tx_queues_to_use;
3509         u32 chan = rx_q->queue_index;
3510         int work_done = 0;
3511         u32 queue;
3512
3513         priv->xstats.napi_poll++;
3514
3515         /* check all the queues */
3516         for (queue = 0; queue < tx_count; queue++)
3517                 stmmac_tx_clean(priv, queue);
3518
3519         work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3520         if (work_done < budget) {
3521                 napi_complete_done(napi, work_done);
3522                 stmmac_enable_dma_irq(priv, chan);
3523         }
3524         return work_done;
3525 }
3526
3527 /**
3528  *  stmmac_tx_timeout
3529  *  @dev : Pointer to net device structure
3530  *  Description: this function is called when a packet transmission fails to
3531  *   complete within a reasonable time. The driver will mark the error in the
3532  *   netdev structure and arrange for the device to be reset to a sane state
3533  *   in order to transmit a new packet.
3534  */
3535 static void stmmac_tx_timeout(struct net_device *dev)
3536 {
3537         struct stmmac_priv *priv = netdev_priv(dev);
3538         u32 tx_count = priv->plat->tx_queues_to_use;
3539         u32 chan;
3540
3541         /* Clear Tx resources and restart transmitting again */
3542         for (chan = 0; chan < tx_count; chan++)
3543                 stmmac_tx_err(priv, chan);
3544 }
3545
3546 /**
3547  *  stmmac_set_rx_mode - entry point for multicast addressing
3548  *  @dev : pointer to the device structure
3549  *  Description:
3550  *  This function is a driver entry point which gets called by the kernel
3551  *  whenever multicast addresses must be enabled/disabled.
3552  *  Return value:
3553  *  void.
3554  */
3555 static void stmmac_set_rx_mode(struct net_device *dev)
3556 {
3557         struct stmmac_priv *priv = netdev_priv(dev);
3558
3559         priv->hw->mac->set_filter(priv->hw, dev);
3560 }
3561
3562 /**
3563  *  stmmac_change_mtu - entry point to change MTU size for the device.
3564  *  @dev : device pointer.
3565  *  @new_mtu : the new MTU size for the device.
3566  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3567  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3568  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3569  *  Return value:
3570  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3571  *  file on failure.
3572  */
3573 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3574 {
3575         struct stmmac_priv *priv = netdev_priv(dev);
3576
3577         if (netif_running(dev)) {
3578                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3579                 return -EBUSY;
3580         }
3581
3582         dev->mtu = new_mtu;
3583
3584         netdev_update_features(dev);
3585
3586         return 0;
3587 }
3588
3589 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3590                                              netdev_features_t features)
3591 {
3592         struct stmmac_priv *priv = netdev_priv(dev);
3593
3594         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3595                 features &= ~NETIF_F_RXCSUM;
3596
3597         if (!priv->plat->tx_coe)
3598                 features &= ~NETIF_F_CSUM_MASK;
3599
3600         /* Some GMAC devices have a bugged Jumbo frame support that
3601          * needs to have the Tx COE disabled for oversized frames
3602          * (due to limited buffer sizes). In this case we disable
3603          * the TX csum insertion in the TDES and not use SF.
3604          */
3605         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3606                 features &= ~NETIF_F_CSUM_MASK;
3607
3608         /* Disable tso if asked by ethtool */
3609         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3610                 if (features & NETIF_F_TSO)
3611                         priv->tso = true;
3612                 else
3613                         priv->tso = false;
3614         }
3615
3616         return features;
3617 }
3618
3619 static int stmmac_set_features(struct net_device *netdev,
3620                                netdev_features_t features)
3621 {
3622         struct stmmac_priv *priv = netdev_priv(netdev);
3623
3624         /* Keep the COE Type in case of csum is supporting */
3625         if (features & NETIF_F_RXCSUM)
3626                 priv->hw->rx_csum = priv->plat->rx_coe;
3627         else
3628                 priv->hw->rx_csum = 0;
3629         /* No check needed because rx_coe has been set before and it will be
3630          * fixed in case of issue.
3631          */
3632         priv->hw->mac->rx_ipc(priv->hw);
3633
3634         return 0;
3635 }
3636
3637 /**
3638  *  stmmac_interrupt - main ISR
3639  *  @irq: interrupt number.
3640  *  @dev_id: to pass the net device pointer.
3641  *  Description: this is the main driver interrupt service routine.
3642  *  It can call:
3643  *  o DMA service routine (to manage incoming frame reception and transmission
3644  *    status)
3645  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3646  *    interrupts.
3647  */
3648 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3649 {
3650         struct net_device *dev = (struct net_device *)dev_id;
3651         struct stmmac_priv *priv = netdev_priv(dev);
3652         u32 rx_cnt = priv->plat->rx_queues_to_use;
3653         u32 tx_cnt = priv->plat->tx_queues_to_use;
3654         u32 queues_count;
3655         u32 queue;
3656
3657         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3658
3659         if (priv->irq_wake)
3660                 pm_wakeup_event(priv->device, 0);
3661
3662         if (unlikely(!dev)) {
3663                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3664                 return IRQ_NONE;
3665         }
3666
3667         /* To handle GMAC own interrupts */
3668         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3669                 int status = priv->hw->mac->host_irq_status(priv->hw,
3670                                                             &priv->xstats);
3671
3672                 if (unlikely(status)) {
3673                         /* For LPI we need to save the tx status */
3674                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3675                                 priv->tx_path_in_lpi_mode = true;
3676                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3677                                 priv->tx_path_in_lpi_mode = false;
3678                 }
3679
3680                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3681                         for (queue = 0; queue < queues_count; queue++) {
3682                                 struct stmmac_rx_queue *rx_q =
3683                                 &priv->rx_queue[queue];
3684
3685                                 status |=
3686                                 priv->hw->mac->host_mtl_irq_status(priv->hw,
3687                                                                    queue);
3688
3689                                 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3690                                     priv->hw->dma->set_rx_tail_ptr)
3691                                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3692                                                                 rx_q->rx_tail_addr,
3693                                                                 queue);
3694                         }
3695                 }
3696
3697                 /* PCS link status */
3698                 if (priv->hw->pcs) {
3699                         if (priv->xstats.pcs_link)
3700                                 netif_carrier_on(dev);
3701                         else
3702                                 netif_carrier_off(dev);
3703                 }
3704         }
3705
3706         /* To handle DMA interrupts */
3707         stmmac_dma_interrupt(priv);
3708
3709         return IRQ_HANDLED;
3710 }
3711
3712 #ifdef CONFIG_NET_POLL_CONTROLLER
3713 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3714  * to allow network I/O with interrupts disabled.
3715  */
3716 static void stmmac_poll_controller(struct net_device *dev)
3717 {
3718         disable_irq(dev->irq);
3719         stmmac_interrupt(dev->irq, dev);
3720         enable_irq(dev->irq);
3721 }
3722 #endif
3723
3724 /**
3725  *  stmmac_ioctl - Entry point for the Ioctl
3726  *  @dev: Device pointer.
3727  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3728  *  a proprietary structure used to pass information to the driver.
3729  *  @cmd: IOCTL command
3730  *  Description:
3731  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3732  */
3733 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3734 {
3735         int ret = -EOPNOTSUPP;
3736
3737         if (!netif_running(dev))
3738                 return -EINVAL;
3739
3740         switch (cmd) {
3741         case SIOCGMIIPHY:
3742         case SIOCGMIIREG:
3743         case SIOCSMIIREG:
3744                 if (!dev->phydev)
3745                         return -EINVAL;
3746                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3747                 break;
3748         case SIOCSHWTSTAMP:
3749                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3750                 break;
3751         default:
3752                 break;
3753         }
3754
3755         return ret;
3756 }
3757
3758 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3759 {
3760         struct stmmac_priv *priv = netdev_priv(ndev);
3761         int ret = 0;
3762
3763         ret = eth_mac_addr(ndev, addr);
3764         if (ret)
3765                 return ret;
3766
3767         priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
3768
3769         return ret;
3770 }
3771
3772 #ifdef CONFIG_DEBUG_FS
3773 static struct dentry *stmmac_fs_dir;
3774
3775 static void sysfs_display_ring(void *head, int size, int extend_desc,
3776                                struct seq_file *seq)
3777 {
3778         int i;
3779         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3780         struct dma_desc *p = (struct dma_desc *)head;
3781
3782         for (i = 0; i < size; i++) {
3783                 if (extend_desc) {
3784                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3785                                    i, (unsigned int)virt_to_phys(ep),
3786                                    le32_to_cpu(ep->basic.des0),
3787                                    le32_to_cpu(ep->basic.des1),
3788                                    le32_to_cpu(ep->basic.des2),
3789                                    le32_to_cpu(ep->basic.des3));
3790                         ep++;
3791                 } else {
3792                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3793                                    i, (unsigned int)virt_to_phys(p),
3794                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3795                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3796                         p++;
3797                 }
3798                 seq_printf(seq, "\n");
3799         }
3800 }
3801
3802 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3803 {
3804         struct net_device *dev = seq->private;
3805         struct stmmac_priv *priv = netdev_priv(dev);
3806         u32 rx_count = priv->plat->rx_queues_to_use;
3807         u32 tx_count = priv->plat->tx_queues_to_use;
3808         u32 queue;
3809
3810         for (queue = 0; queue < rx_count; queue++) {
3811                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3812
3813                 seq_printf(seq, "RX Queue %d:\n", queue);
3814
3815                 if (priv->extend_desc) {
3816                         seq_printf(seq, "Extended descriptor ring:\n");
3817                         sysfs_display_ring((void *)rx_q->dma_erx,
3818                                            DMA_RX_SIZE, 1, seq);
3819                 } else {
3820                         seq_printf(seq, "Descriptor ring:\n");
3821                         sysfs_display_ring((void *)rx_q->dma_rx,
3822                                            DMA_RX_SIZE, 0, seq);
3823                 }
3824         }
3825
3826         for (queue = 0; queue < tx_count; queue++) {
3827                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3828
3829                 seq_printf(seq, "TX Queue %d:\n", queue);
3830
3831                 if (priv->extend_desc) {
3832                         seq_printf(seq, "Extended descriptor ring:\n");
3833                         sysfs_display_ring((void *)tx_q->dma_etx,
3834                                            DMA_TX_SIZE, 1, seq);
3835                 } else {
3836                         seq_printf(seq, "Descriptor ring:\n");
3837                         sysfs_display_ring((void *)tx_q->dma_tx,
3838                                            DMA_TX_SIZE, 0, seq);
3839                 }
3840         }
3841
3842         return 0;
3843 }
3844
3845 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3846 {
3847         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3848 }
3849
3850 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3851
3852 static const struct file_operations stmmac_rings_status_fops = {
3853         .owner = THIS_MODULE,
3854         .open = stmmac_sysfs_ring_open,
3855         .read = seq_read,
3856         .llseek = seq_lseek,
3857         .release = single_release,
3858 };
3859
3860 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3861 {
3862         struct net_device *dev = seq->private;
3863         struct stmmac_priv *priv = netdev_priv(dev);
3864
3865         if (!priv->hw_cap_support) {
3866                 seq_printf(seq, "DMA HW features not supported\n");
3867                 return 0;
3868         }
3869
3870         seq_printf(seq, "==============================\n");
3871         seq_printf(seq, "\tDMA HW features\n");
3872         seq_printf(seq, "==============================\n");
3873
3874         seq_printf(seq, "\t10/100 Mbps: %s\n",
3875                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3876         seq_printf(seq, "\t1000 Mbps: %s\n",
3877                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3878         seq_printf(seq, "\tHalf duplex: %s\n",
3879                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3880         seq_printf(seq, "\tHash Filter: %s\n",
3881                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3882         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3883                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3884         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3885                    (priv->dma_cap.pcs) ? "Y" : "N");
3886         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3887                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3888         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3889                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3890         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3891                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3892         seq_printf(seq, "\tRMON module: %s\n",
3893                    (priv->dma_cap.rmon) ? "Y" : "N");
3894         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3895                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3896         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3897                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3898         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3899                    (priv->dma_cap.eee) ? "Y" : "N");
3900         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3901         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3902                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3903         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3904                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3905                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3906         } else {
3907                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3908                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3909                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3910                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3911         }
3912         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3913                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3914         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3915                    priv->dma_cap.number_rx_channel);
3916         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3917                    priv->dma_cap.number_tx_channel);
3918         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3919                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3920
3921         return 0;
3922 }
3923
3924 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3925 {
3926         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3927 }
3928
3929 static const struct file_operations stmmac_dma_cap_fops = {
3930         .owner = THIS_MODULE,
3931         .open = stmmac_sysfs_dma_cap_open,
3932         .read = seq_read,
3933         .llseek = seq_lseek,
3934         .release = single_release,
3935 };
3936
3937 static int stmmac_init_fs(struct net_device *dev)
3938 {
3939         struct stmmac_priv *priv = netdev_priv(dev);
3940
3941         /* Create per netdev entries */
3942         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3943
3944         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3945                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3946
3947                 return -ENOMEM;
3948         }
3949
3950         /* Entry to report DMA RX/TX rings */
3951         priv->dbgfs_rings_status =
3952                 debugfs_create_file("descriptors_status", S_IRUGO,
3953                                     priv->dbgfs_dir, dev,
3954                                     &stmmac_rings_status_fops);
3955
3956         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3957                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3958                 debugfs_remove_recursive(priv->dbgfs_dir);
3959
3960                 return -ENOMEM;
3961         }
3962
3963         /* Entry to report the DMA HW features */
3964         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3965                                             priv->dbgfs_dir,
3966                                             dev, &stmmac_dma_cap_fops);
3967
3968         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3969                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3970                 debugfs_remove_recursive(priv->dbgfs_dir);
3971
3972                 return -ENOMEM;
3973         }
3974
3975         return 0;
3976 }
3977
3978 static void stmmac_exit_fs(struct net_device *dev)
3979 {
3980         struct stmmac_priv *priv = netdev_priv(dev);
3981
3982         debugfs_remove_recursive(priv->dbgfs_dir);
3983 }
3984 #endif /* CONFIG_DEBUG_FS */
3985
3986 static const struct net_device_ops stmmac_netdev_ops = {
3987         .ndo_open = stmmac_open,
3988         .ndo_start_xmit = stmmac_xmit,
3989         .ndo_stop = stmmac_release,
3990         .ndo_change_mtu = stmmac_change_mtu,
3991         .ndo_fix_features = stmmac_fix_features,
3992         .ndo_set_features = stmmac_set_features,
3993         .ndo_set_rx_mode = stmmac_set_rx_mode,
3994         .ndo_tx_timeout = stmmac_tx_timeout,
3995         .ndo_do_ioctl = stmmac_ioctl,
3996 #ifdef CONFIG_NET_POLL_CONTROLLER
3997         .ndo_poll_controller = stmmac_poll_controller,
3998 #endif
3999         .ndo_set_mac_address = stmmac_set_mac_address,
4000 };
4001
4002 /**
4003  *  stmmac_hw_init - Init the MAC device
4004  *  @priv: driver private structure
4005  *  Description: this function is to configure the MAC device according to
4006  *  some platform parameters or the HW capability register. It prepares the
4007  *  driver to use either ring or chain modes and to setup either enhanced or
4008  *  normal descriptors.
4009  */
4010 static int stmmac_hw_init(struct stmmac_priv *priv)
4011 {
4012         struct mac_device_info *mac;
4013
4014         /* Identify the MAC HW device */
4015         if (priv->plat->setup) {
4016                 mac = priv->plat->setup(priv);
4017         } else if (priv->plat->has_gmac) {
4018                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
4019                 mac = dwmac1000_setup(priv->ioaddr,
4020                                       priv->plat->multicast_filter_bins,
4021                                       priv->plat->unicast_filter_entries,
4022                                       &priv->synopsys_id);
4023         } else if (priv->plat->has_gmac4) {
4024                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
4025                 mac = dwmac4_setup(priv->ioaddr,
4026                                    priv->plat->multicast_filter_bins,
4027                                    priv->plat->unicast_filter_entries,
4028                                    &priv->synopsys_id);
4029         } else {
4030                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
4031         }
4032         if (!mac)
4033                 return -ENOMEM;
4034
4035         priv->hw = mac;
4036
4037         /* dwmac-sun8i only work in chain mode */
4038         if (priv->plat->has_sun8i)
4039                 chain_mode = 1;
4040
4041         /* To use the chained or ring mode */
4042         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4043                 priv->hw->mode = &dwmac4_ring_mode_ops;
4044         } else {
4045                 if (chain_mode) {
4046                         priv->hw->mode = &chain_mode_ops;
4047                         dev_info(priv->device, "Chain mode enabled\n");
4048                         priv->mode = STMMAC_CHAIN_MODE;
4049                 } else {
4050                         priv->hw->mode = &ring_mode_ops;
4051                         dev_info(priv->device, "Ring mode enabled\n");
4052                         priv->mode = STMMAC_RING_MODE;
4053                 }
4054         }
4055
4056         /* Get the HW capability (new GMAC newer than 3.50a) */
4057         priv->hw_cap_support = stmmac_get_hw_features(priv);
4058         if (priv->hw_cap_support) {
4059                 dev_info(priv->device, "DMA HW capability register supported\n");
4060
4061                 /* We can override some gmac/dma configuration fields: e.g.
4062                  * enh_desc, tx_coe (e.g. that are passed through the
4063                  * platform) with the values from the HW capability
4064                  * register (if supported).
4065                  */
4066                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4067                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4068                 priv->hw->pmt = priv->plat->pmt;
4069
4070                 /* TXCOE doesn't work in thresh DMA mode */
4071                 if (priv->plat->force_thresh_dma_mode)
4072                         priv->plat->tx_coe = 0;
4073                 else
4074                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4075
4076                 /* In case of GMAC4 rx_coe is from HW cap register. */
4077                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4078
4079                 if (priv->dma_cap.rx_coe_type2)
4080                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4081                 else if (priv->dma_cap.rx_coe_type1)
4082                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4083
4084         } else {
4085                 dev_info(priv->device, "No HW DMA feature register supported\n");
4086         }
4087
4088         /* To use alternate (extended), normal or GMAC4 descriptor structures */
4089         if (priv->synopsys_id >= DWMAC_CORE_4_00)
4090                 priv->hw->desc = &dwmac4_desc_ops;
4091         else
4092                 stmmac_selec_desc_mode(priv);
4093
4094         if (priv->plat->rx_coe) {
4095                 priv->hw->rx_csum = priv->plat->rx_coe;
4096                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4097                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4098                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4099         }
4100         if (priv->plat->tx_coe)
4101                 dev_info(priv->device, "TX Checksum insertion supported\n");
4102
4103         if (priv->plat->pmt) {
4104                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4105                 device_set_wakeup_capable(priv->device, 1);
4106         }
4107
4108         if (priv->dma_cap.tsoen)
4109                 dev_info(priv->device, "TSO supported\n");
4110
4111         return 0;
4112 }
4113
4114 /**
4115  * stmmac_dvr_probe
4116  * @device: device pointer
4117  * @plat_dat: platform data pointer
4118  * @res: stmmac resource pointer
4119  * Description: this is the main probe function used to
4120  * call the alloc_etherdev, allocate the priv structure.
4121  * Return:
4122  * returns 0 on success, otherwise errno.
4123  */
4124 int stmmac_dvr_probe(struct device *device,
4125                      struct plat_stmmacenet_data *plat_dat,
4126                      struct stmmac_resources *res)
4127 {
4128         struct net_device *ndev = NULL;
4129         struct stmmac_priv *priv;
4130         int ret = 0;
4131         u32 queue;
4132
4133         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4134                                   MTL_MAX_TX_QUEUES,
4135                                   MTL_MAX_RX_QUEUES);
4136         if (!ndev)
4137                 return -ENOMEM;
4138
4139         SET_NETDEV_DEV(ndev, device);
4140
4141         priv = netdev_priv(ndev);
4142         priv->device = device;
4143         priv->dev = ndev;
4144
4145         stmmac_set_ethtool_ops(ndev);
4146         priv->pause = pause;
4147         priv->plat = plat_dat;
4148         priv->ioaddr = res->addr;
4149         priv->dev->base_addr = (unsigned long)res->addr;
4150
4151         priv->dev->irq = res->irq;
4152         priv->wol_irq = res->wol_irq;
4153         priv->lpi_irq = res->lpi_irq;
4154
4155         if (res->mac)
4156                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4157
4158         dev_set_drvdata(device, priv->dev);
4159
4160         /* Verify driver arguments */
4161         stmmac_verify_args();
4162
4163         /* Override with kernel parameters if supplied XXX CRS XXX
4164          * this needs to have multiple instances
4165          */
4166         if ((phyaddr >= 0) && (phyaddr <= 31))
4167                 priv->plat->phy_addr = phyaddr;
4168
4169         if (priv->plat->stmmac_rst) {
4170                 ret = reset_control_assert(priv->plat->stmmac_rst);
4171                 reset_control_deassert(priv->plat->stmmac_rst);
4172                 /* Some reset controllers have only reset callback instead of
4173                  * assert + deassert callbacks pair.
4174                  */
4175                 if (ret == -ENOTSUPP)
4176                         reset_control_reset(priv->plat->stmmac_rst);
4177         }
4178
4179         /* Init MAC and get the capabilities */
4180         ret = stmmac_hw_init(priv);
4181         if (ret)
4182                 goto error_hw_init;
4183
4184         /* Configure real RX and TX queues */
4185         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4186         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4187
4188         ndev->netdev_ops = &stmmac_netdev_ops;
4189
4190         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4191                             NETIF_F_RXCSUM;
4192
4193         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4194                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4195                 priv->tso = true;
4196                 dev_info(priv->device, "TSO feature enabled\n");
4197         }
4198         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4199         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4200 #ifdef STMMAC_VLAN_TAG_USED
4201         /* Both mac100 and gmac support receive VLAN tag detection */
4202         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4203 #endif
4204         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4205
4206         /* MTU range: 46 - hw-specific max */
4207         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4208         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4209                 ndev->max_mtu = JUMBO_LEN;
4210         else
4211                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4212         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4213          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4214          */
4215         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4216             (priv->plat->maxmtu >= ndev->min_mtu))
4217                 ndev->max_mtu = priv->plat->maxmtu;
4218         else if (priv->plat->maxmtu < ndev->min_mtu)
4219                 dev_warn(priv->device,
4220                          "%s: warning: maxmtu having invalid value (%d)\n",
4221                          __func__, priv->plat->maxmtu);
4222
4223         if (flow_ctrl)
4224                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4225
4226         /* Rx Watchdog is available in the COREs newer than the 3.40.
4227          * In some case, for example on bugged HW this feature
4228          * has to be disable and this can be done by passing the
4229          * riwt_off field from the platform.
4230          */
4231         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4232                 priv->use_riwt = 1;
4233                 dev_info(priv->device,
4234                          "Enable RX Mitigation via HW Watchdog Timer\n");
4235         }
4236
4237         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4238                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4239
4240                 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4241                                (8 * priv->plat->rx_queues_to_use));
4242         }
4243
4244         spin_lock_init(&priv->lock);
4245
4246         /* If a specific clk_csr value is passed from the platform
4247          * this means that the CSR Clock Range selection cannot be
4248          * changed at run-time and it is fixed. Viceversa the driver'll try to
4249          * set the MDC clock dynamically according to the csr actual
4250          * clock input.
4251          */
4252         if (!priv->plat->clk_csr)
4253                 stmmac_clk_csr_set(priv);
4254         else
4255                 priv->clk_csr = priv->plat->clk_csr;
4256
4257         stmmac_check_pcs_mode(priv);
4258
4259         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4260             priv->hw->pcs != STMMAC_PCS_TBI &&
4261             priv->hw->pcs != STMMAC_PCS_RTBI) {
4262                 /* MDIO bus Registration */
4263                 ret = stmmac_mdio_register(ndev);
4264                 if (ret < 0) {
4265                         dev_err(priv->device,
4266                                 "%s: MDIO bus (id: %d) registration failed",
4267                                 __func__, priv->plat->bus_id);
4268                         goto error_mdio_register;
4269                 }
4270         }
4271
4272         ret = register_netdev(ndev);
4273         if (ret) {
4274                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4275                         __func__, ret);
4276                 goto error_netdev_register;
4277         }
4278
4279         return ret;
4280
4281 error_netdev_register:
4282         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4283             priv->hw->pcs != STMMAC_PCS_TBI &&
4284             priv->hw->pcs != STMMAC_PCS_RTBI)
4285                 stmmac_mdio_unregister(ndev);
4286 error_mdio_register:
4287         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4288                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4289
4290                 netif_napi_del(&rx_q->napi);
4291         }
4292 error_hw_init:
4293         free_netdev(ndev);
4294
4295         return ret;
4296 }
4297 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4298
4299 /**
4300  * stmmac_dvr_remove
4301  * @dev: device pointer
4302  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4303  * changes the link status, releases the DMA descriptor rings.
4304  */
4305 int stmmac_dvr_remove(struct device *dev)
4306 {
4307         struct net_device *ndev = dev_get_drvdata(dev);
4308         struct stmmac_priv *priv = netdev_priv(ndev);
4309
4310         netdev_info(priv->dev, "%s: removing driver", __func__);
4311
4312         stmmac_stop_all_dma(priv);
4313
4314         priv->hw->mac->set_mac(priv->ioaddr, false);
4315         netif_carrier_off(ndev);
4316         unregister_netdev(ndev);
4317         if (priv->plat->stmmac_rst)
4318                 reset_control_assert(priv->plat->stmmac_rst);
4319         clk_disable_unprepare(priv->plat->pclk);
4320         clk_disable_unprepare(priv->plat->stmmac_clk);
4321         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4322             priv->hw->pcs != STMMAC_PCS_TBI &&
4323             priv->hw->pcs != STMMAC_PCS_RTBI)
4324                 stmmac_mdio_unregister(ndev);
4325         free_netdev(ndev);
4326
4327         return 0;
4328 }
4329 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4330
4331 /**
4332  * stmmac_suspend - suspend callback
4333  * @dev: device pointer
4334  * Description: this is the function to suspend the device and it is called
4335  * by the platform driver to stop the network queue, release the resources,
4336  * program the PMT register (for WoL), clean and release driver resources.
4337  */
4338 int stmmac_suspend(struct device *dev)
4339 {
4340         struct net_device *ndev = dev_get_drvdata(dev);
4341         struct stmmac_priv *priv = netdev_priv(ndev);
4342         unsigned long flags;
4343
4344         if (!ndev || !netif_running(ndev))
4345                 return 0;
4346
4347         if (ndev->phydev)
4348                 phy_stop(ndev->phydev);
4349
4350         spin_lock_irqsave(&priv->lock, flags);
4351
4352         netif_device_detach(ndev);
4353         stmmac_stop_all_queues(priv);
4354
4355         stmmac_disable_all_queues(priv);
4356
4357         /* Stop TX/RX DMA */
4358         stmmac_stop_all_dma(priv);
4359
4360         /* Enable Power down mode by programming the PMT regs */
4361         if (device_may_wakeup(priv->device)) {
4362                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4363                 priv->irq_wake = 1;
4364         } else {
4365                 priv->hw->mac->set_mac(priv->ioaddr, false);
4366                 pinctrl_pm_select_sleep_state(priv->device);
4367                 /* Disable clock in case of PWM is off */
4368                 clk_disable(priv->plat->pclk);
4369                 clk_disable(priv->plat->stmmac_clk);
4370         }
4371         spin_unlock_irqrestore(&priv->lock, flags);
4372
4373         priv->oldlink = false;
4374         priv->speed = SPEED_UNKNOWN;
4375         priv->oldduplex = DUPLEX_UNKNOWN;
4376         return 0;
4377 }
4378 EXPORT_SYMBOL_GPL(stmmac_suspend);
4379
4380 /**
4381  * stmmac_reset_queues_param - reset queue parameters
4382  * @dev: device pointer
4383  */
4384 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4385 {
4386         u32 rx_cnt = priv->plat->rx_queues_to_use;
4387         u32 tx_cnt = priv->plat->tx_queues_to_use;
4388         u32 queue;
4389
4390         for (queue = 0; queue < rx_cnt; queue++) {
4391                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4392
4393                 rx_q->cur_rx = 0;
4394                 rx_q->dirty_rx = 0;
4395         }
4396
4397         for (queue = 0; queue < tx_cnt; queue++) {
4398                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4399
4400                 tx_q->cur_tx = 0;
4401                 tx_q->dirty_tx = 0;
4402         }
4403 }
4404
4405 /**
4406  * stmmac_resume - resume callback
4407  * @dev: device pointer
4408  * Description: when resume this function is invoked to setup the DMA and CORE
4409  * in a usable state.
4410  */
4411 int stmmac_resume(struct device *dev)
4412 {
4413         struct net_device *ndev = dev_get_drvdata(dev);
4414         struct stmmac_priv *priv = netdev_priv(ndev);
4415         unsigned long flags;
4416
4417         if (!netif_running(ndev))
4418                 return 0;
4419
4420         /* Power Down bit, into the PM register, is cleared
4421          * automatically as soon as a magic packet or a Wake-up frame
4422          * is received. Anyway, it's better to manually clear
4423          * this bit because it can generate problems while resuming
4424          * from another devices (e.g. serial console).
4425          */
4426         if (device_may_wakeup(priv->device)) {
4427                 spin_lock_irqsave(&priv->lock, flags);
4428                 priv->hw->mac->pmt(priv->hw, 0);
4429                 spin_unlock_irqrestore(&priv->lock, flags);
4430                 priv->irq_wake = 0;
4431         } else {
4432                 pinctrl_pm_select_default_state(priv->device);
4433                 /* enable the clk previously disabled */
4434                 clk_enable(priv->plat->stmmac_clk);
4435                 clk_enable(priv->plat->pclk);
4436                 /* reset the phy so that it's ready */
4437                 if (priv->mii)
4438                         stmmac_mdio_reset(priv->mii);
4439         }
4440
4441         netif_device_attach(ndev);
4442
4443         spin_lock_irqsave(&priv->lock, flags);
4444
4445         stmmac_reset_queues_param(priv);
4446
4447         /* reset private mss value to force mss context settings at
4448          * next tso xmit (only used for gmac4).
4449          */
4450         priv->mss = 0;
4451
4452         stmmac_clear_descriptors(priv);
4453
4454         stmmac_hw_setup(ndev, false);
4455         stmmac_init_tx_coalesce(priv);
4456         stmmac_set_rx_mode(ndev);
4457
4458         stmmac_enable_all_queues(priv);
4459
4460         stmmac_start_all_queues(priv);
4461
4462         spin_unlock_irqrestore(&priv->lock, flags);
4463
4464         if (ndev->phydev)
4465                 phy_start(ndev->phydev);
4466
4467         return 0;
4468 }
4469 EXPORT_SYMBOL_GPL(stmmac_resume);
4470
4471 #ifndef MODULE
4472 static int __init stmmac_cmdline_opt(char *str)
4473 {
4474         char *opt;
4475
4476         if (!str || !*str)
4477                 return -EINVAL;
4478         while ((opt = strsep(&str, ",")) != NULL) {
4479                 if (!strncmp(opt, "debug:", 6)) {
4480                         if (kstrtoint(opt + 6, 0, &debug))
4481                                 goto err;
4482                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4483                         if (kstrtoint(opt + 8, 0, &phyaddr))
4484                                 goto err;
4485                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4486                         if (kstrtoint(opt + 7, 0, &buf_sz))
4487                                 goto err;
4488                 } else if (!strncmp(opt, "tc:", 3)) {
4489                         if (kstrtoint(opt + 3, 0, &tc))
4490                                 goto err;
4491                 } else if (!strncmp(opt, "watchdog:", 9)) {
4492                         if (kstrtoint(opt + 9, 0, &watchdog))
4493                                 goto err;
4494                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4495                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4496                                 goto err;
4497                 } else if (!strncmp(opt, "pause:", 6)) {
4498                         if (kstrtoint(opt + 6, 0, &pause))
4499                                 goto err;
4500                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4501                         if (kstrtoint(opt + 10, 0, &eee_timer))
4502                                 goto err;
4503                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4504                         if (kstrtoint(opt + 11, 0, &chain_mode))
4505                                 goto err;
4506                 }
4507         }
4508         return 0;
4509
4510 err:
4511         pr_err("%s: ERROR broken module parameter conversion", __func__);
4512         return -EINVAL;
4513 }
4514
4515 __setup("stmmaceth=", stmmac_cmdline_opt);
4516 #endif /* MODULE */
4517
4518 static int __init stmmac_init(void)
4519 {
4520 #ifdef CONFIG_DEBUG_FS
4521         /* Create debugfs main directory if it doesn't exist yet */
4522         if (!stmmac_fs_dir) {
4523                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4524
4525                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4526                         pr_err("ERROR %s, debugfs create directory failed\n",
4527                                STMMAC_RESOURCE_NAME);
4528
4529                         return -ENOMEM;
4530                 }
4531         }
4532 #endif
4533
4534         return 0;
4535 }
4536
4537 static void __exit stmmac_exit(void)
4538 {
4539 #ifdef CONFIG_DEBUG_FS
4540         debugfs_remove_recursive(stmmac_fs_dir);
4541 #endif
4542 }
4543
4544 module_init(stmmac_init)
4545 module_exit(stmmac_exit)
4546
4547 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4548 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4549 MODULE_LICENSE("GPL");