Merge remote-tracking branches 'asoc/fix/msm8916', 'asoc/fix/nau8825', 'asoc/fix...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / renesas / sh_eth.c
1 /*  SuperH Ethernet device driver
2  *
3  *  Copyright (C) 2014 Renesas Electronics Corporation
4  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
6  *  Copyright (C) 2013-2017 Cogent Embedded, Inc.
7  *  Copyright (C) 2014 Codethink Limited
8  *
9  *  This program is free software; you can redistribute it and/or modify it
10  *  under the terms and conditions of the GNU General Public License,
11  *  version 2, as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope it will be useful, but WITHOUT
14  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  *  more details.
17  *
18  *  The full GNU General Public License is included in this distribution in
19  *  the file called "COPYING".
20  */
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
38 #include <linux/io.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/clk.h>
44 #include <linux/sh_eth.h>
45 #include <linux/of_mdio.h>
46
47 #include "sh_eth.h"
48
49 #define SH_ETH_DEF_MSG_ENABLE \
50                 (NETIF_MSG_LINK | \
51                 NETIF_MSG_TIMER | \
52                 NETIF_MSG_RX_ERR| \
53                 NETIF_MSG_TX_ERR)
54
55 #define SH_ETH_OFFSET_INVALID   ((u16)~0)
56
57 #define SH_ETH_OFFSET_DEFAULTS                  \
58         [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
59
60 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
61         SH_ETH_OFFSET_DEFAULTS,
62
63         [EDSR]          = 0x0000,
64         [EDMR]          = 0x0400,
65         [EDTRR]         = 0x0408,
66         [EDRRR]         = 0x0410,
67         [EESR]          = 0x0428,
68         [EESIPR]        = 0x0430,
69         [TDLAR]         = 0x0010,
70         [TDFAR]         = 0x0014,
71         [TDFXR]         = 0x0018,
72         [TDFFR]         = 0x001c,
73         [RDLAR]         = 0x0030,
74         [RDFAR]         = 0x0034,
75         [RDFXR]         = 0x0038,
76         [RDFFR]         = 0x003c,
77         [TRSCER]        = 0x0438,
78         [RMFCR]         = 0x0440,
79         [TFTR]          = 0x0448,
80         [FDR]           = 0x0450,
81         [RMCR]          = 0x0458,
82         [RPADIR]        = 0x0460,
83         [FCFTR]         = 0x0468,
84         [CSMR]          = 0x04E4,
85
86         [ECMR]          = 0x0500,
87         [ECSR]          = 0x0510,
88         [ECSIPR]        = 0x0518,
89         [PIR]           = 0x0520,
90         [PSR]           = 0x0528,
91         [PIPR]          = 0x052c,
92         [RFLR]          = 0x0508,
93         [APR]           = 0x0554,
94         [MPR]           = 0x0558,
95         [PFTCR]         = 0x055c,
96         [PFRCR]         = 0x0560,
97         [TPAUSER]       = 0x0564,
98         [GECMR]         = 0x05b0,
99         [BCULR]         = 0x05b4,
100         [MAHR]          = 0x05c0,
101         [MALR]          = 0x05c8,
102         [TROCR]         = 0x0700,
103         [CDCR]          = 0x0708,
104         [LCCR]          = 0x0710,
105         [CEFCR]         = 0x0740,
106         [FRECR]         = 0x0748,
107         [TSFRCR]        = 0x0750,
108         [TLFRCR]        = 0x0758,
109         [RFCR]          = 0x0760,
110         [CERCR]         = 0x0768,
111         [CEECR]         = 0x0770,
112         [MAFCR]         = 0x0778,
113         [RMII_MII]      = 0x0790,
114
115         [ARSTR]         = 0x0000,
116         [TSU_CTRST]     = 0x0004,
117         [TSU_FWEN0]     = 0x0010,
118         [TSU_FWEN1]     = 0x0014,
119         [TSU_FCM]       = 0x0018,
120         [TSU_BSYSL0]    = 0x0020,
121         [TSU_BSYSL1]    = 0x0024,
122         [TSU_PRISL0]    = 0x0028,
123         [TSU_PRISL1]    = 0x002c,
124         [TSU_FWSL0]     = 0x0030,
125         [TSU_FWSL1]     = 0x0034,
126         [TSU_FWSLC]     = 0x0038,
127         [TSU_QTAG0]     = 0x0040,
128         [TSU_QTAG1]     = 0x0044,
129         [TSU_FWSR]      = 0x0050,
130         [TSU_FWINMK]    = 0x0054,
131         [TSU_ADQT0]     = 0x0048,
132         [TSU_ADQT1]     = 0x004c,
133         [TSU_VTAG0]     = 0x0058,
134         [TSU_VTAG1]     = 0x005c,
135         [TSU_ADSBSY]    = 0x0060,
136         [TSU_TEN]       = 0x0064,
137         [TSU_POST1]     = 0x0070,
138         [TSU_POST2]     = 0x0074,
139         [TSU_POST3]     = 0x0078,
140         [TSU_POST4]     = 0x007c,
141         [TSU_ADRH0]     = 0x0100,
142
143         [TXNLCR0]       = 0x0080,
144         [TXALCR0]       = 0x0084,
145         [RXNLCR0]       = 0x0088,
146         [RXALCR0]       = 0x008c,
147         [FWNLCR0]       = 0x0090,
148         [FWALCR0]       = 0x0094,
149         [TXNLCR1]       = 0x00a0,
150         [TXALCR1]       = 0x00a0,
151         [RXNLCR1]       = 0x00a8,
152         [RXALCR1]       = 0x00ac,
153         [FWNLCR1]       = 0x00b0,
154         [FWALCR1]       = 0x00b4,
155 };
156
157 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
158         SH_ETH_OFFSET_DEFAULTS,
159
160         [EDSR]          = 0x0000,
161         [EDMR]          = 0x0400,
162         [EDTRR]         = 0x0408,
163         [EDRRR]         = 0x0410,
164         [EESR]          = 0x0428,
165         [EESIPR]        = 0x0430,
166         [TDLAR]         = 0x0010,
167         [TDFAR]         = 0x0014,
168         [TDFXR]         = 0x0018,
169         [TDFFR]         = 0x001c,
170         [RDLAR]         = 0x0030,
171         [RDFAR]         = 0x0034,
172         [RDFXR]         = 0x0038,
173         [RDFFR]         = 0x003c,
174         [TRSCER]        = 0x0438,
175         [RMFCR]         = 0x0440,
176         [TFTR]          = 0x0448,
177         [FDR]           = 0x0450,
178         [RMCR]          = 0x0458,
179         [RPADIR]        = 0x0460,
180         [FCFTR]         = 0x0468,
181         [CSMR]          = 0x04E4,
182
183         [ECMR]          = 0x0500,
184         [RFLR]          = 0x0508,
185         [ECSR]          = 0x0510,
186         [ECSIPR]        = 0x0518,
187         [PIR]           = 0x0520,
188         [APR]           = 0x0554,
189         [MPR]           = 0x0558,
190         [PFTCR]         = 0x055c,
191         [PFRCR]         = 0x0560,
192         [TPAUSER]       = 0x0564,
193         [MAHR]          = 0x05c0,
194         [MALR]          = 0x05c8,
195         [CEFCR]         = 0x0740,
196         [FRECR]         = 0x0748,
197         [TSFRCR]        = 0x0750,
198         [TLFRCR]        = 0x0758,
199         [RFCR]          = 0x0760,
200         [MAFCR]         = 0x0778,
201
202         [ARSTR]         = 0x0000,
203         [TSU_CTRST]     = 0x0004,
204         [TSU_FWSLC]     = 0x0038,
205         [TSU_VTAG0]     = 0x0058,
206         [TSU_ADSBSY]    = 0x0060,
207         [TSU_TEN]       = 0x0064,
208         [TSU_POST1]     = 0x0070,
209         [TSU_POST2]     = 0x0074,
210         [TSU_POST3]     = 0x0078,
211         [TSU_POST4]     = 0x007c,
212         [TSU_ADRH0]     = 0x0100,
213
214         [TXNLCR0]       = 0x0080,
215         [TXALCR0]       = 0x0084,
216         [RXNLCR0]       = 0x0088,
217         [RXALCR0]       = 0x008C,
218 };
219
220 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
221         SH_ETH_OFFSET_DEFAULTS,
222
223         [ECMR]          = 0x0300,
224         [RFLR]          = 0x0308,
225         [ECSR]          = 0x0310,
226         [ECSIPR]        = 0x0318,
227         [PIR]           = 0x0320,
228         [PSR]           = 0x0328,
229         [RDMLR]         = 0x0340,
230         [IPGR]          = 0x0350,
231         [APR]           = 0x0354,
232         [MPR]           = 0x0358,
233         [RFCF]          = 0x0360,
234         [TPAUSER]       = 0x0364,
235         [TPAUSECR]      = 0x0368,
236         [MAHR]          = 0x03c0,
237         [MALR]          = 0x03c8,
238         [TROCR]         = 0x03d0,
239         [CDCR]          = 0x03d4,
240         [LCCR]          = 0x03d8,
241         [CNDCR]         = 0x03dc,
242         [CEFCR]         = 0x03e4,
243         [FRECR]         = 0x03e8,
244         [TSFRCR]        = 0x03ec,
245         [TLFRCR]        = 0x03f0,
246         [RFCR]          = 0x03f4,
247         [MAFCR]         = 0x03f8,
248
249         [EDMR]          = 0x0200,
250         [EDTRR]         = 0x0208,
251         [EDRRR]         = 0x0210,
252         [TDLAR]         = 0x0218,
253         [RDLAR]         = 0x0220,
254         [EESR]          = 0x0228,
255         [EESIPR]        = 0x0230,
256         [TRSCER]        = 0x0238,
257         [RMFCR]         = 0x0240,
258         [TFTR]          = 0x0248,
259         [FDR]           = 0x0250,
260         [RMCR]          = 0x0258,
261         [TFUCR]         = 0x0264,
262         [RFOCR]         = 0x0268,
263         [RMIIMODE]      = 0x026c,
264         [FCFTR]         = 0x0270,
265         [TRIMD]         = 0x027c,
266 };
267
268 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
269         SH_ETH_OFFSET_DEFAULTS,
270
271         [ECMR]          = 0x0100,
272         [RFLR]          = 0x0108,
273         [ECSR]          = 0x0110,
274         [ECSIPR]        = 0x0118,
275         [PIR]           = 0x0120,
276         [PSR]           = 0x0128,
277         [RDMLR]         = 0x0140,
278         [IPGR]          = 0x0150,
279         [APR]           = 0x0154,
280         [MPR]           = 0x0158,
281         [TPAUSER]       = 0x0164,
282         [RFCF]          = 0x0160,
283         [TPAUSECR]      = 0x0168,
284         [BCFRR]         = 0x016c,
285         [MAHR]          = 0x01c0,
286         [MALR]          = 0x01c8,
287         [TROCR]         = 0x01d0,
288         [CDCR]          = 0x01d4,
289         [LCCR]          = 0x01d8,
290         [CNDCR]         = 0x01dc,
291         [CEFCR]         = 0x01e4,
292         [FRECR]         = 0x01e8,
293         [TSFRCR]        = 0x01ec,
294         [TLFRCR]        = 0x01f0,
295         [RFCR]          = 0x01f4,
296         [MAFCR]         = 0x01f8,
297         [RTRATE]        = 0x01fc,
298
299         [EDMR]          = 0x0000,
300         [EDTRR]         = 0x0008,
301         [EDRRR]         = 0x0010,
302         [TDLAR]         = 0x0018,
303         [RDLAR]         = 0x0020,
304         [EESR]          = 0x0028,
305         [EESIPR]        = 0x0030,
306         [TRSCER]        = 0x0038,
307         [RMFCR]         = 0x0040,
308         [TFTR]          = 0x0048,
309         [FDR]           = 0x0050,
310         [RMCR]          = 0x0058,
311         [TFUCR]         = 0x0064,
312         [RFOCR]         = 0x0068,
313         [FCFTR]         = 0x0070,
314         [RPADIR]        = 0x0078,
315         [TRIMD]         = 0x007c,
316         [RBWAR]         = 0x00c8,
317         [RDFAR]         = 0x00cc,
318         [TBRAR]         = 0x00d4,
319         [TDFAR]         = 0x00d8,
320 };
321
322 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
323         SH_ETH_OFFSET_DEFAULTS,
324
325         [EDMR]          = 0x0000,
326         [EDTRR]         = 0x0004,
327         [EDRRR]         = 0x0008,
328         [TDLAR]         = 0x000c,
329         [RDLAR]         = 0x0010,
330         [EESR]          = 0x0014,
331         [EESIPR]        = 0x0018,
332         [TRSCER]        = 0x001c,
333         [RMFCR]         = 0x0020,
334         [TFTR]          = 0x0024,
335         [FDR]           = 0x0028,
336         [RMCR]          = 0x002c,
337         [EDOCR]         = 0x0030,
338         [FCFTR]         = 0x0034,
339         [RPADIR]        = 0x0038,
340         [TRIMD]         = 0x003c,
341         [RBWAR]         = 0x0040,
342         [RDFAR]         = 0x0044,
343         [TBRAR]         = 0x004c,
344         [TDFAR]         = 0x0050,
345
346         [ECMR]          = 0x0160,
347         [ECSR]          = 0x0164,
348         [ECSIPR]        = 0x0168,
349         [PIR]           = 0x016c,
350         [MAHR]          = 0x0170,
351         [MALR]          = 0x0174,
352         [RFLR]          = 0x0178,
353         [PSR]           = 0x017c,
354         [TROCR]         = 0x0180,
355         [CDCR]          = 0x0184,
356         [LCCR]          = 0x0188,
357         [CNDCR]         = 0x018c,
358         [CEFCR]         = 0x0194,
359         [FRECR]         = 0x0198,
360         [TSFRCR]        = 0x019c,
361         [TLFRCR]        = 0x01a0,
362         [RFCR]          = 0x01a4,
363         [MAFCR]         = 0x01a8,
364         [IPGR]          = 0x01b4,
365         [APR]           = 0x01b8,
366         [MPR]           = 0x01bc,
367         [TPAUSER]       = 0x01c4,
368         [BCFR]          = 0x01cc,
369
370         [ARSTR]         = 0x0000,
371         [TSU_CTRST]     = 0x0004,
372         [TSU_FWEN0]     = 0x0010,
373         [TSU_FWEN1]     = 0x0014,
374         [TSU_FCM]       = 0x0018,
375         [TSU_BSYSL0]    = 0x0020,
376         [TSU_BSYSL1]    = 0x0024,
377         [TSU_PRISL0]    = 0x0028,
378         [TSU_PRISL1]    = 0x002c,
379         [TSU_FWSL0]     = 0x0030,
380         [TSU_FWSL1]     = 0x0034,
381         [TSU_FWSLC]     = 0x0038,
382         [TSU_QTAGM0]    = 0x0040,
383         [TSU_QTAGM1]    = 0x0044,
384         [TSU_ADQT0]     = 0x0048,
385         [TSU_ADQT1]     = 0x004c,
386         [TSU_FWSR]      = 0x0050,
387         [TSU_FWINMK]    = 0x0054,
388         [TSU_ADSBSY]    = 0x0060,
389         [TSU_TEN]       = 0x0064,
390         [TSU_POST1]     = 0x0070,
391         [TSU_POST2]     = 0x0074,
392         [TSU_POST3]     = 0x0078,
393         [TSU_POST4]     = 0x007c,
394
395         [TXNLCR0]       = 0x0080,
396         [TXALCR0]       = 0x0084,
397         [RXNLCR0]       = 0x0088,
398         [RXALCR0]       = 0x008c,
399         [FWNLCR0]       = 0x0090,
400         [FWALCR0]       = 0x0094,
401         [TXNLCR1]       = 0x00a0,
402         [TXALCR1]       = 0x00a0,
403         [RXNLCR1]       = 0x00a8,
404         [RXALCR1]       = 0x00ac,
405         [FWNLCR1]       = 0x00b0,
406         [FWALCR1]       = 0x00b4,
407
408         [TSU_ADRH0]     = 0x0100,
409 };
410
411 static void sh_eth_rcv_snd_disable(struct net_device *ndev);
412 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
413
414 static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
415 {
416         struct sh_eth_private *mdp = netdev_priv(ndev);
417         u16 offset = mdp->reg_offset[enum_index];
418
419         if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
420                 return;
421
422         iowrite32(data, mdp->addr + offset);
423 }
424
425 static u32 sh_eth_read(struct net_device *ndev, int enum_index)
426 {
427         struct sh_eth_private *mdp = netdev_priv(ndev);
428         u16 offset = mdp->reg_offset[enum_index];
429
430         if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
431                 return ~0U;
432
433         return ioread32(mdp->addr + offset);
434 }
435
436 static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
437                           u32 set)
438 {
439         sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
440                      enum_index);
441 }
442
443 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
444 {
445         return mdp->reg_offset == sh_eth_offset_gigabit;
446 }
447
448 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
449 {
450         return mdp->reg_offset == sh_eth_offset_fast_rz;
451 }
452
453 static void sh_eth_select_mii(struct net_device *ndev)
454 {
455         struct sh_eth_private *mdp = netdev_priv(ndev);
456         u32 value;
457
458         switch (mdp->phy_interface) {
459         case PHY_INTERFACE_MODE_GMII:
460                 value = 0x2;
461                 break;
462         case PHY_INTERFACE_MODE_MII:
463                 value = 0x1;
464                 break;
465         case PHY_INTERFACE_MODE_RMII:
466                 value = 0x0;
467                 break;
468         default:
469                 netdev_warn(ndev,
470                             "PHY interface mode was not setup. Set to MII.\n");
471                 value = 0x1;
472                 break;
473         }
474
475         sh_eth_write(ndev, value, RMII_MII);
476 }
477
478 static void sh_eth_set_duplex(struct net_device *ndev)
479 {
480         struct sh_eth_private *mdp = netdev_priv(ndev);
481
482         sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
483 }
484
485 static void sh_eth_chip_reset(struct net_device *ndev)
486 {
487         struct sh_eth_private *mdp = netdev_priv(ndev);
488
489         /* reset device */
490         sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
491         mdelay(1);
492 }
493
494 static void sh_eth_set_rate_gether(struct net_device *ndev)
495 {
496         struct sh_eth_private *mdp = netdev_priv(ndev);
497
498         switch (mdp->speed) {
499         case 10: /* 10BASE */
500                 sh_eth_write(ndev, GECMR_10, GECMR);
501                 break;
502         case 100:/* 100BASE */
503                 sh_eth_write(ndev, GECMR_100, GECMR);
504                 break;
505         case 1000: /* 1000BASE */
506                 sh_eth_write(ndev, GECMR_1000, GECMR);
507                 break;
508         }
509 }
510
511 #ifdef CONFIG_OF
512 /* R7S72100 */
513 static struct sh_eth_cpu_data r7s72100_data = {
514         .chip_reset     = sh_eth_chip_reset,
515         .set_duplex     = sh_eth_set_duplex,
516
517         .register_type  = SH_ETH_REG_FAST_RZ,
518
519         .ecsr_value     = ECSR_ICD,
520         .ecsipr_value   = ECSIPR_ICDIP,
521         .eesipr_value   = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP |
522                           EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP |
523                           EESIPR_ECIIP |
524                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
525                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
526                           EESIPR_RMAFIP | EESIPR_RRFIP |
527                           EESIPR_RTLFIP | EESIPR_RTSFIP |
528                           EESIPR_PREIP | EESIPR_CERFIP,
529
530         .tx_check       = EESR_TC1 | EESR_FTC,
531         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
532                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
533                           EESR_TDE,
534         .fdr_value      = 0x0000070f,
535
536         .no_psr         = 1,
537         .apr            = 1,
538         .mpr            = 1,
539         .tpauser        = 1,
540         .hw_swap        = 1,
541         .rpadir         = 1,
542         .rpadir_value   = 2 << 16,
543         .no_trimd       = 1,
544         .no_ade         = 1,
545         .hw_checksum    = 1,
546         .tsu            = 1,
547 };
548
549 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
550 {
551         sh_eth_chip_reset(ndev);
552
553         sh_eth_select_mii(ndev);
554 }
555
556 /* R8A7740 */
557 static struct sh_eth_cpu_data r8a7740_data = {
558         .chip_reset     = sh_eth_chip_reset_r8a7740,
559         .set_duplex     = sh_eth_set_duplex,
560         .set_rate       = sh_eth_set_rate_gether,
561
562         .register_type  = SH_ETH_REG_GIGABIT,
563
564         .ecsr_value     = ECSR_ICD | ECSR_MPD,
565         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
566         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
567                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
568                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
569                           0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
570                           EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
571                           EESIPR_CEEFIP | EESIPR_CELFIP |
572                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
573                           EESIPR_PREIP | EESIPR_CERFIP,
574
575         .tx_check       = EESR_TC1 | EESR_FTC,
576         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
577                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
578                           EESR_TDE,
579         .fdr_value      = 0x0000070f,
580
581         .apr            = 1,
582         .mpr            = 1,
583         .tpauser        = 1,
584         .bculr          = 1,
585         .hw_swap        = 1,
586         .rpadir         = 1,
587         .rpadir_value   = 2 << 16,
588         .no_trimd       = 1,
589         .no_ade         = 1,
590         .hw_checksum    = 1,
591         .tsu            = 1,
592         .select_mii     = 1,
593         .magic          = 1,
594 };
595
596 /* There is CPU dependent code */
597 static void sh_eth_set_rate_rcar(struct net_device *ndev)
598 {
599         struct sh_eth_private *mdp = netdev_priv(ndev);
600
601         switch (mdp->speed) {
602         case 10: /* 10BASE */
603                 sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
604                 break;
605         case 100:/* 100BASE */
606                 sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
607                 break;
608         }
609 }
610
611 /* R-Car Gen1 */
612 static struct sh_eth_cpu_data rcar_gen1_data = {
613         .set_duplex     = sh_eth_set_duplex,
614         .set_rate       = sh_eth_set_rate_rcar,
615
616         .register_type  = SH_ETH_REG_FAST_RCAR,
617
618         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
619         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
620         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
621                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
622                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
623                           EESIPR_RMAFIP | EESIPR_RRFIP |
624                           EESIPR_RTLFIP | EESIPR_RTSFIP |
625                           EESIPR_PREIP | EESIPR_CERFIP,
626
627         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
628         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
629                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
630         .fdr_value      = 0x00000f0f,
631
632         .apr            = 1,
633         .mpr            = 1,
634         .tpauser        = 1,
635         .hw_swap        = 1,
636 };
637
638 /* R-Car Gen2 and RZ/G1 */
639 static struct sh_eth_cpu_data rcar_gen2_data = {
640         .set_duplex     = sh_eth_set_duplex,
641         .set_rate       = sh_eth_set_rate_rcar,
642
643         .register_type  = SH_ETH_REG_FAST_RCAR,
644
645         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
646         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
647                           ECSIPR_MPDIP,
648         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
649                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
650                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
651                           EESIPR_RMAFIP | EESIPR_RRFIP |
652                           EESIPR_RTLFIP | EESIPR_RTSFIP |
653                           EESIPR_PREIP | EESIPR_CERFIP,
654
655         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
656         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
657                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
658         .fdr_value      = 0x00000f0f,
659
660         .trscer_err_mask = DESC_I_RINT8,
661
662         .apr            = 1,
663         .mpr            = 1,
664         .tpauser        = 1,
665         .hw_swap        = 1,
666         .rmiimode       = 1,
667         .magic          = 1,
668 };
669 #endif /* CONFIG_OF */
670
671 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
672 {
673         struct sh_eth_private *mdp = netdev_priv(ndev);
674
675         switch (mdp->speed) {
676         case 10: /* 10BASE */
677                 sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
678                 break;
679         case 100:/* 100BASE */
680                 sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
681                 break;
682         }
683 }
684
685 /* SH7724 */
686 static struct sh_eth_cpu_data sh7724_data = {
687         .set_duplex     = sh_eth_set_duplex,
688         .set_rate       = sh_eth_set_rate_sh7724,
689
690         .register_type  = SH_ETH_REG_FAST_SH4,
691
692         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
693         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
694         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
695                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
696                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
697                           EESIPR_RMAFIP | EESIPR_RRFIP |
698                           EESIPR_RTLFIP | EESIPR_RTSFIP |
699                           EESIPR_PREIP | EESIPR_CERFIP,
700
701         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
702         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
703                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
704
705         .apr            = 1,
706         .mpr            = 1,
707         .tpauser        = 1,
708         .hw_swap        = 1,
709         .rpadir         = 1,
710         .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
711 };
712
713 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
714 {
715         struct sh_eth_private *mdp = netdev_priv(ndev);
716
717         switch (mdp->speed) {
718         case 10: /* 10BASE */
719                 sh_eth_write(ndev, 0, RTRATE);
720                 break;
721         case 100:/* 100BASE */
722                 sh_eth_write(ndev, 1, RTRATE);
723                 break;
724         }
725 }
726
727 /* SH7757 */
728 static struct sh_eth_cpu_data sh7757_data = {
729         .set_duplex     = sh_eth_set_duplex,
730         .set_rate       = sh_eth_set_rate_sh7757,
731
732         .register_type  = SH_ETH_REG_FAST_SH4,
733
734         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
735                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
736                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
737                           0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
738                           EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
739                           EESIPR_CEEFIP | EESIPR_CELFIP |
740                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
741                           EESIPR_PREIP | EESIPR_CERFIP,
742
743         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
744         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
745                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
746
747         .irq_flags      = IRQF_SHARED,
748         .apr            = 1,
749         .mpr            = 1,
750         .tpauser        = 1,
751         .hw_swap        = 1,
752         .no_ade         = 1,
753         .rpadir         = 1,
754         .rpadir_value   = 2 << 16,
755         .rtrate         = 1,
756 };
757
758 #define SH_GIGA_ETH_BASE        0xfee00000UL
759 #define GIGA_MALR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
760 #define GIGA_MAHR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
761 static void sh_eth_chip_reset_giga(struct net_device *ndev)
762 {
763         u32 mahr[2], malr[2];
764         int i;
765
766         /* save MAHR and MALR */
767         for (i = 0; i < 2; i++) {
768                 malr[i] = ioread32((void *)GIGA_MALR(i));
769                 mahr[i] = ioread32((void *)GIGA_MAHR(i));
770         }
771
772         sh_eth_chip_reset(ndev);
773
774         /* restore MAHR and MALR */
775         for (i = 0; i < 2; i++) {
776                 iowrite32(malr[i], (void *)GIGA_MALR(i));
777                 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
778         }
779 }
780
781 static void sh_eth_set_rate_giga(struct net_device *ndev)
782 {
783         struct sh_eth_private *mdp = netdev_priv(ndev);
784
785         switch (mdp->speed) {
786         case 10: /* 10BASE */
787                 sh_eth_write(ndev, 0x00000000, GECMR);
788                 break;
789         case 100:/* 100BASE */
790                 sh_eth_write(ndev, 0x00000010, GECMR);
791                 break;
792         case 1000: /* 1000BASE */
793                 sh_eth_write(ndev, 0x00000020, GECMR);
794                 break;
795         }
796 }
797
798 /* SH7757(GETHERC) */
799 static struct sh_eth_cpu_data sh7757_data_giga = {
800         .chip_reset     = sh_eth_chip_reset_giga,
801         .set_duplex     = sh_eth_set_duplex,
802         .set_rate       = sh_eth_set_rate_giga,
803
804         .register_type  = SH_ETH_REG_GIGABIT,
805
806         .ecsr_value     = ECSR_ICD | ECSR_MPD,
807         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
808         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
809                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
810                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
811                           0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
812                           EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
813                           EESIPR_CEEFIP | EESIPR_CELFIP |
814                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
815                           EESIPR_PREIP | EESIPR_CERFIP,
816
817         .tx_check       = EESR_TC1 | EESR_FTC,
818         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
819                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
820                           EESR_TDE,
821         .fdr_value      = 0x0000072f,
822
823         .irq_flags      = IRQF_SHARED,
824         .apr            = 1,
825         .mpr            = 1,
826         .tpauser        = 1,
827         .bculr          = 1,
828         .hw_swap        = 1,
829         .rpadir         = 1,
830         .rpadir_value   = 2 << 16,
831         .no_trimd       = 1,
832         .no_ade         = 1,
833         .tsu            = 1,
834 };
835
836 /* SH7734 */
837 static struct sh_eth_cpu_data sh7734_data = {
838         .chip_reset     = sh_eth_chip_reset,
839         .set_duplex     = sh_eth_set_duplex,
840         .set_rate       = sh_eth_set_rate_gether,
841
842         .register_type  = SH_ETH_REG_GIGABIT,
843
844         .ecsr_value     = ECSR_ICD | ECSR_MPD,
845         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
846         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
847                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
848                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
849                           EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
850                           EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
851                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
852                           EESIPR_PREIP | EESIPR_CERFIP,
853
854         .tx_check       = EESR_TC1 | EESR_FTC,
855         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
856                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
857                           EESR_TDE,
858
859         .apr            = 1,
860         .mpr            = 1,
861         .tpauser        = 1,
862         .bculr          = 1,
863         .hw_swap        = 1,
864         .no_trimd       = 1,
865         .no_ade         = 1,
866         .tsu            = 1,
867         .hw_checksum    = 1,
868         .select_mii     = 1,
869         .magic          = 1,
870 };
871
872 /* SH7763 */
873 static struct sh_eth_cpu_data sh7763_data = {
874         .chip_reset     = sh_eth_chip_reset,
875         .set_duplex     = sh_eth_set_duplex,
876         .set_rate       = sh_eth_set_rate_gether,
877
878         .register_type  = SH_ETH_REG_GIGABIT,
879
880         .ecsr_value     = ECSR_ICD | ECSR_MPD,
881         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
882         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
883                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
884                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
885                           EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
886                           EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
887                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
888                           EESIPR_PREIP | EESIPR_CERFIP,
889
890         .tx_check       = EESR_TC1 | EESR_FTC,
891         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
892                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
893
894         .apr            = 1,
895         .mpr            = 1,
896         .tpauser        = 1,
897         .bculr          = 1,
898         .hw_swap        = 1,
899         .no_trimd       = 1,
900         .no_ade         = 1,
901         .tsu            = 1,
902         .irq_flags      = IRQF_SHARED,
903         .magic          = 1,
904 };
905
906 static struct sh_eth_cpu_data sh7619_data = {
907         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
908
909         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
910                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
911                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
912                           0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
913                           EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
914                           EESIPR_CEEFIP | EESIPR_CELFIP |
915                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
916                           EESIPR_PREIP | EESIPR_CERFIP,
917
918         .apr            = 1,
919         .mpr            = 1,
920         .tpauser        = 1,
921         .hw_swap        = 1,
922 };
923
924 static struct sh_eth_cpu_data sh771x_data = {
925         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
926
927         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
928                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
929                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
930                           0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
931                           EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
932                           EESIPR_CEEFIP | EESIPR_CELFIP |
933                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
934                           EESIPR_PREIP | EESIPR_CERFIP,
935         .tsu            = 1,
936 };
937
938 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
939 {
940         if (!cd->ecsr_value)
941                 cd->ecsr_value = DEFAULT_ECSR_INIT;
942
943         if (!cd->ecsipr_value)
944                 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
945
946         if (!cd->fcftr_value)
947                 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
948                                   DEFAULT_FIFO_F_D_RFD;
949
950         if (!cd->fdr_value)
951                 cd->fdr_value = DEFAULT_FDR_INIT;
952
953         if (!cd->tx_check)
954                 cd->tx_check = DEFAULT_TX_CHECK;
955
956         if (!cd->eesr_err_check)
957                 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
958
959         if (!cd->trscer_err_mask)
960                 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
961 }
962
963 static int sh_eth_check_reset(struct net_device *ndev)
964 {
965         int ret = 0;
966         int cnt = 100;
967
968         while (cnt > 0) {
969                 if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
970                         break;
971                 mdelay(1);
972                 cnt--;
973         }
974         if (cnt <= 0) {
975                 netdev_err(ndev, "Device reset failed\n");
976                 ret = -ETIMEDOUT;
977         }
978         return ret;
979 }
980
981 static int sh_eth_reset(struct net_device *ndev)
982 {
983         struct sh_eth_private *mdp = netdev_priv(ndev);
984         int ret = 0;
985
986         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
987                 sh_eth_write(ndev, EDSR_ENALL, EDSR);
988                 sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
989
990                 ret = sh_eth_check_reset(ndev);
991                 if (ret)
992                         return ret;
993
994                 /* Table Init */
995                 sh_eth_write(ndev, 0x0, TDLAR);
996                 sh_eth_write(ndev, 0x0, TDFAR);
997                 sh_eth_write(ndev, 0x0, TDFXR);
998                 sh_eth_write(ndev, 0x0, TDFFR);
999                 sh_eth_write(ndev, 0x0, RDLAR);
1000                 sh_eth_write(ndev, 0x0, RDFAR);
1001                 sh_eth_write(ndev, 0x0, RDFXR);
1002                 sh_eth_write(ndev, 0x0, RDFFR);
1003
1004                 /* Reset HW CRC register */
1005                 if (mdp->cd->hw_checksum)
1006                         sh_eth_write(ndev, 0x0, CSMR);
1007
1008                 /* Select MII mode */
1009                 if (mdp->cd->select_mii)
1010                         sh_eth_select_mii(ndev);
1011         } else {
1012                 sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
1013                 mdelay(3);
1014                 sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
1015         }
1016
1017         return ret;
1018 }
1019
1020 static void sh_eth_set_receive_align(struct sk_buff *skb)
1021 {
1022         uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
1023
1024         if (reserve)
1025                 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
1026 }
1027
1028 /* Program the hardware MAC address from dev->dev_addr. */
1029 static void update_mac_address(struct net_device *ndev)
1030 {
1031         sh_eth_write(ndev,
1032                      (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
1033                      (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
1034         sh_eth_write(ndev,
1035                      (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
1036 }
1037
1038 /* Get MAC address from SuperH MAC address register
1039  *
1040  * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
1041  * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
1042  * When you want use this device, you must set MAC address in bootloader.
1043  *
1044  */
1045 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
1046 {
1047         if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
1048                 memcpy(ndev->dev_addr, mac, ETH_ALEN);
1049         } else {
1050                 u32 mahr = sh_eth_read(ndev, MAHR);
1051                 u32 malr = sh_eth_read(ndev, MALR);
1052
1053                 ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
1054                 ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
1055                 ndev->dev_addr[2] = (mahr >>  8) & 0xFF;
1056                 ndev->dev_addr[3] = (mahr >>  0) & 0xFF;
1057                 ndev->dev_addr[4] = (malr >>  8) & 0xFF;
1058                 ndev->dev_addr[5] = (malr >>  0) & 0xFF;
1059         }
1060 }
1061
1062 static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
1063 {
1064         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
1065                 return EDTRR_TRNS_GETHER;
1066         else
1067                 return EDTRR_TRNS_ETHER;
1068 }
1069
1070 struct bb_info {
1071         void (*set_gate)(void *addr);
1072         struct mdiobb_ctrl ctrl;
1073         void *addr;
1074 };
1075
1076 static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
1077 {
1078         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1079         u32 pir;
1080
1081         if (bitbang->set_gate)
1082                 bitbang->set_gate(bitbang->addr);
1083
1084         pir = ioread32(bitbang->addr);
1085         if (set)
1086                 pir |=  mask;
1087         else
1088                 pir &= ~mask;
1089         iowrite32(pir, bitbang->addr);
1090 }
1091
1092 /* Data I/O pin control */
1093 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1094 {
1095         sh_mdio_ctrl(ctrl, PIR_MMD, bit);
1096 }
1097
1098 /* Set bit data*/
1099 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1100 {
1101         sh_mdio_ctrl(ctrl, PIR_MDO, bit);
1102 }
1103
1104 /* Get bit data*/
1105 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1106 {
1107         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1108
1109         if (bitbang->set_gate)
1110                 bitbang->set_gate(bitbang->addr);
1111
1112         return (ioread32(bitbang->addr) & PIR_MDI) != 0;
1113 }
1114
1115 /* MDC pin control */
1116 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1117 {
1118         sh_mdio_ctrl(ctrl, PIR_MDC, bit);
1119 }
1120
1121 /* mdio bus control struct */
1122 static struct mdiobb_ops bb_ops = {
1123         .owner = THIS_MODULE,
1124         .set_mdc = sh_mdc_ctrl,
1125         .set_mdio_dir = sh_mmd_ctrl,
1126         .set_mdio_data = sh_set_mdio,
1127         .get_mdio_data = sh_get_mdio,
1128 };
1129
1130 /* free Tx skb function */
1131 static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1132 {
1133         struct sh_eth_private *mdp = netdev_priv(ndev);
1134         struct sh_eth_txdesc *txdesc;
1135         int free_num = 0;
1136         int entry;
1137         bool sent;
1138
1139         for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1140                 entry = mdp->dirty_tx % mdp->num_tx_ring;
1141                 txdesc = &mdp->tx_ring[entry];
1142                 sent = !(txdesc->status & cpu_to_le32(TD_TACT));
1143                 if (sent_only && !sent)
1144                         break;
1145                 /* TACT bit must be checked before all the following reads */
1146                 dma_rmb();
1147                 netif_info(mdp, tx_done, ndev,
1148                            "tx entry %d status 0x%08x\n",
1149                            entry, le32_to_cpu(txdesc->status));
1150                 /* Free the original skb. */
1151                 if (mdp->tx_skbuff[entry]) {
1152                         dma_unmap_single(&mdp->pdev->dev,
1153                                          le32_to_cpu(txdesc->addr),
1154                                          le32_to_cpu(txdesc->len) >> 16,
1155                                          DMA_TO_DEVICE);
1156                         dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1157                         mdp->tx_skbuff[entry] = NULL;
1158                         free_num++;
1159                 }
1160                 txdesc->status = cpu_to_le32(TD_TFP);
1161                 if (entry >= mdp->num_tx_ring - 1)
1162                         txdesc->status |= cpu_to_le32(TD_TDLE);
1163
1164                 if (sent) {
1165                         ndev->stats.tx_packets++;
1166                         ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1167                 }
1168         }
1169         return free_num;
1170 }
1171
1172 /* free skb and descriptor buffer */
1173 static void sh_eth_ring_free(struct net_device *ndev)
1174 {
1175         struct sh_eth_private *mdp = netdev_priv(ndev);
1176         int ringsize, i;
1177
1178         if (mdp->rx_ring) {
1179                 for (i = 0; i < mdp->num_rx_ring; i++) {
1180                         if (mdp->rx_skbuff[i]) {
1181                                 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1182
1183                                 dma_unmap_single(&mdp->pdev->dev,
1184                                                  le32_to_cpu(rxdesc->addr),
1185                                                  ALIGN(mdp->rx_buf_sz, 32),
1186                                                  DMA_FROM_DEVICE);
1187                         }
1188                 }
1189                 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1190                 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
1191                                   mdp->rx_desc_dma);
1192                 mdp->rx_ring = NULL;
1193         }
1194
1195         /* Free Rx skb ringbuffer */
1196         if (mdp->rx_skbuff) {
1197                 for (i = 0; i < mdp->num_rx_ring; i++)
1198                         dev_kfree_skb(mdp->rx_skbuff[i]);
1199         }
1200         kfree(mdp->rx_skbuff);
1201         mdp->rx_skbuff = NULL;
1202
1203         if (mdp->tx_ring) {
1204                 sh_eth_tx_free(ndev, false);
1205
1206                 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1207                 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
1208                                   mdp->tx_desc_dma);
1209                 mdp->tx_ring = NULL;
1210         }
1211
1212         /* Free Tx skb ringbuffer */
1213         kfree(mdp->tx_skbuff);
1214         mdp->tx_skbuff = NULL;
1215 }
1216
1217 /* format skb and descriptor buffer */
1218 static void sh_eth_ring_format(struct net_device *ndev)
1219 {
1220         struct sh_eth_private *mdp = netdev_priv(ndev);
1221         int i;
1222         struct sk_buff *skb;
1223         struct sh_eth_rxdesc *rxdesc = NULL;
1224         struct sh_eth_txdesc *txdesc = NULL;
1225         int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1226         int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1227         int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1228         dma_addr_t dma_addr;
1229         u32 buf_len;
1230
1231         mdp->cur_rx = 0;
1232         mdp->cur_tx = 0;
1233         mdp->dirty_rx = 0;
1234         mdp->dirty_tx = 0;
1235
1236         memset(mdp->rx_ring, 0, rx_ringsize);
1237
1238         /* build Rx ring buffer */
1239         for (i = 0; i < mdp->num_rx_ring; i++) {
1240                 /* skb */
1241                 mdp->rx_skbuff[i] = NULL;
1242                 skb = netdev_alloc_skb(ndev, skbuff_size);
1243                 if (skb == NULL)
1244                         break;
1245                 sh_eth_set_receive_align(skb);
1246
1247                 /* The size of the buffer is a multiple of 32 bytes. */
1248                 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1249                 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
1250                                           DMA_FROM_DEVICE);
1251                 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1252                         kfree_skb(skb);
1253                         break;
1254                 }
1255                 mdp->rx_skbuff[i] = skb;
1256
1257                 /* RX descriptor */
1258                 rxdesc = &mdp->rx_ring[i];
1259                 rxdesc->len = cpu_to_le32(buf_len << 16);
1260                 rxdesc->addr = cpu_to_le32(dma_addr);
1261                 rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
1262
1263                 /* Rx descriptor address set */
1264                 if (i == 0) {
1265                         sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1266                         if (sh_eth_is_gether(mdp) ||
1267                             sh_eth_is_rz_fast_ether(mdp))
1268                                 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1269                 }
1270         }
1271
1272         mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1273
1274         /* Mark the last entry as wrapping the ring. */
1275         if (rxdesc)
1276                 rxdesc->status |= cpu_to_le32(RD_RDLE);
1277
1278         memset(mdp->tx_ring, 0, tx_ringsize);
1279
1280         /* build Tx ring buffer */
1281         for (i = 0; i < mdp->num_tx_ring; i++) {
1282                 mdp->tx_skbuff[i] = NULL;
1283                 txdesc = &mdp->tx_ring[i];
1284                 txdesc->status = cpu_to_le32(TD_TFP);
1285                 txdesc->len = cpu_to_le32(0);
1286                 if (i == 0) {
1287                         /* Tx descriptor address set */
1288                         sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1289                         if (sh_eth_is_gether(mdp) ||
1290                             sh_eth_is_rz_fast_ether(mdp))
1291                                 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1292                 }
1293         }
1294
1295         txdesc->status |= cpu_to_le32(TD_TDLE);
1296 }
1297
1298 /* Get skb and descriptor buffer */
1299 static int sh_eth_ring_init(struct net_device *ndev)
1300 {
1301         struct sh_eth_private *mdp = netdev_priv(ndev);
1302         int rx_ringsize, tx_ringsize;
1303
1304         /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1305          * card needs room to do 8 byte alignment, +2 so we can reserve
1306          * the first 2 bytes, and +16 gets room for the status word from the
1307          * card.
1308          */
1309         mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1310                           (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1311         if (mdp->cd->rpadir)
1312                 mdp->rx_buf_sz += NET_IP_ALIGN;
1313
1314         /* Allocate RX and TX skb rings */
1315         mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1316                                  GFP_KERNEL);
1317         if (!mdp->rx_skbuff)
1318                 return -ENOMEM;
1319
1320         mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1321                                  GFP_KERNEL);
1322         if (!mdp->tx_skbuff)
1323                 goto ring_free;
1324
1325         /* Allocate all Rx descriptors. */
1326         rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1327         mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
1328                                           &mdp->rx_desc_dma, GFP_KERNEL);
1329         if (!mdp->rx_ring)
1330                 goto ring_free;
1331
1332         mdp->dirty_rx = 0;
1333
1334         /* Allocate all Tx descriptors. */
1335         tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1336         mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
1337                                           &mdp->tx_desc_dma, GFP_KERNEL);
1338         if (!mdp->tx_ring)
1339                 goto ring_free;
1340         return 0;
1341
1342 ring_free:
1343         /* Free Rx and Tx skb ring buffer and DMA buffer */
1344         sh_eth_ring_free(ndev);
1345
1346         return -ENOMEM;
1347 }
1348
1349 static int sh_eth_dev_init(struct net_device *ndev)
1350 {
1351         struct sh_eth_private *mdp = netdev_priv(ndev);
1352         int ret;
1353
1354         /* Soft Reset */
1355         ret = sh_eth_reset(ndev);
1356         if (ret)
1357                 return ret;
1358
1359         if (mdp->cd->rmiimode)
1360                 sh_eth_write(ndev, 0x1, RMIIMODE);
1361
1362         /* Descriptor format */
1363         sh_eth_ring_format(ndev);
1364         if (mdp->cd->rpadir)
1365                 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1366
1367         /* all sh_eth int mask */
1368         sh_eth_write(ndev, 0, EESIPR);
1369
1370 #if defined(__LITTLE_ENDIAN)
1371         if (mdp->cd->hw_swap)
1372                 sh_eth_write(ndev, EDMR_EL, EDMR);
1373         else
1374 #endif
1375                 sh_eth_write(ndev, 0, EDMR);
1376
1377         /* FIFO size set */
1378         sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1379         sh_eth_write(ndev, 0, TFTR);
1380
1381         /* Frame recv control (enable multiple-packets per rx irq) */
1382         sh_eth_write(ndev, RMCR_RNC, RMCR);
1383
1384         sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1385
1386         if (mdp->cd->bculr)
1387                 sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
1388
1389         sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1390
1391         if (!mdp->cd->no_trimd)
1392                 sh_eth_write(ndev, 0, TRIMD);
1393
1394         /* Recv frame limit set register */
1395         sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1396                      RFLR);
1397
1398         sh_eth_modify(ndev, EESR, 0, 0);
1399         mdp->irq_enabled = true;
1400         sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1401
1402         /* PAUSE Prohibition */
1403         sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
1404                      ECMR_TE | ECMR_RE, ECMR);
1405
1406         if (mdp->cd->set_rate)
1407                 mdp->cd->set_rate(ndev);
1408
1409         /* E-MAC Status Register clear */
1410         sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1411
1412         /* E-MAC Interrupt Enable register */
1413         sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1414
1415         /* Set MAC address */
1416         update_mac_address(ndev);
1417
1418         /* mask reset */
1419         if (mdp->cd->apr)
1420                 sh_eth_write(ndev, APR_AP, APR);
1421         if (mdp->cd->mpr)
1422                 sh_eth_write(ndev, MPR_MP, MPR);
1423         if (mdp->cd->tpauser)
1424                 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1425
1426         /* Setting the Rx mode will start the Rx process. */
1427         sh_eth_write(ndev, EDRRR_R, EDRRR);
1428
1429         return ret;
1430 }
1431
1432 static void sh_eth_dev_exit(struct net_device *ndev)
1433 {
1434         struct sh_eth_private *mdp = netdev_priv(ndev);
1435         int i;
1436
1437         /* Deactivate all TX descriptors, so DMA should stop at next
1438          * packet boundary if it's currently running
1439          */
1440         for (i = 0; i < mdp->num_tx_ring; i++)
1441                 mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
1442
1443         /* Disable TX FIFO egress to MAC */
1444         sh_eth_rcv_snd_disable(ndev);
1445
1446         /* Stop RX DMA at next packet boundary */
1447         sh_eth_write(ndev, 0, EDRRR);
1448
1449         /* Aside from TX DMA, we can't tell when the hardware is
1450          * really stopped, so we need to reset to make sure.
1451          * Before doing that, wait for long enough to *probably*
1452          * finish transmitting the last packet and poll stats.
1453          */
1454         msleep(2); /* max frame time at 10 Mbps < 1250 us */
1455         sh_eth_get_stats(ndev);
1456         sh_eth_reset(ndev);
1457
1458         /* Set MAC address again */
1459         update_mac_address(ndev);
1460 }
1461
1462 /* Packet receive function */
1463 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1464 {
1465         struct sh_eth_private *mdp = netdev_priv(ndev);
1466         struct sh_eth_rxdesc *rxdesc;
1467
1468         int entry = mdp->cur_rx % mdp->num_rx_ring;
1469         int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1470         int limit;
1471         struct sk_buff *skb;
1472         u32 desc_status;
1473         int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1474         dma_addr_t dma_addr;
1475         u16 pkt_len;
1476         u32 buf_len;
1477
1478         boguscnt = min(boguscnt, *quota);
1479         limit = boguscnt;
1480         rxdesc = &mdp->rx_ring[entry];
1481         while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
1482                 /* RACT bit must be checked before all the following reads */
1483                 dma_rmb();
1484                 desc_status = le32_to_cpu(rxdesc->status);
1485                 pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL;
1486
1487                 if (--boguscnt < 0)
1488                         break;
1489
1490                 netif_info(mdp, rx_status, ndev,
1491                            "rx entry %d status 0x%08x len %d\n",
1492                            entry, desc_status, pkt_len);
1493
1494                 if (!(desc_status & RDFEND))
1495                         ndev->stats.rx_length_errors++;
1496
1497                 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1498                  * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1499                  * bit 0. However, in case of the R8A7740 and R7S72100
1500                  * the RFS bits are from bit 25 to bit 16. So, the
1501                  * driver needs right shifting by 16.
1502                  */
1503                 if (mdp->cd->hw_checksum)
1504                         desc_status >>= 16;
1505
1506                 skb = mdp->rx_skbuff[entry];
1507                 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1508                                    RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1509                         ndev->stats.rx_errors++;
1510                         if (desc_status & RD_RFS1)
1511                                 ndev->stats.rx_crc_errors++;
1512                         if (desc_status & RD_RFS2)
1513                                 ndev->stats.rx_frame_errors++;
1514                         if (desc_status & RD_RFS3)
1515                                 ndev->stats.rx_length_errors++;
1516                         if (desc_status & RD_RFS4)
1517                                 ndev->stats.rx_length_errors++;
1518                         if (desc_status & RD_RFS6)
1519                                 ndev->stats.rx_missed_errors++;
1520                         if (desc_status & RD_RFS10)
1521                                 ndev->stats.rx_over_errors++;
1522                 } else  if (skb) {
1523                         dma_addr = le32_to_cpu(rxdesc->addr);
1524                         if (!mdp->cd->hw_swap)
1525                                 sh_eth_soft_swap(
1526                                         phys_to_virt(ALIGN(dma_addr, 4)),
1527                                         pkt_len + 2);
1528                         mdp->rx_skbuff[entry] = NULL;
1529                         if (mdp->cd->rpadir)
1530                                 skb_reserve(skb, NET_IP_ALIGN);
1531                         dma_unmap_single(&mdp->pdev->dev, dma_addr,
1532                                          ALIGN(mdp->rx_buf_sz, 32),
1533                                          DMA_FROM_DEVICE);
1534                         skb_put(skb, pkt_len);
1535                         skb->protocol = eth_type_trans(skb, ndev);
1536                         netif_receive_skb(skb);
1537                         ndev->stats.rx_packets++;
1538                         ndev->stats.rx_bytes += pkt_len;
1539                         if (desc_status & RD_RFS8)
1540                                 ndev->stats.multicast++;
1541                 }
1542                 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1543                 rxdesc = &mdp->rx_ring[entry];
1544         }
1545
1546         /* Refill the Rx ring buffers. */
1547         for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1548                 entry = mdp->dirty_rx % mdp->num_rx_ring;
1549                 rxdesc = &mdp->rx_ring[entry];
1550                 /* The size of the buffer is 32 byte boundary. */
1551                 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1552                 rxdesc->len = cpu_to_le32(buf_len << 16);
1553
1554                 if (mdp->rx_skbuff[entry] == NULL) {
1555                         skb = netdev_alloc_skb(ndev, skbuff_size);
1556                         if (skb == NULL)
1557                                 break;  /* Better luck next round. */
1558                         sh_eth_set_receive_align(skb);
1559                         dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
1560                                                   buf_len, DMA_FROM_DEVICE);
1561                         if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1562                                 kfree_skb(skb);
1563                                 break;
1564                         }
1565                         mdp->rx_skbuff[entry] = skb;
1566
1567                         skb_checksum_none_assert(skb);
1568                         rxdesc->addr = cpu_to_le32(dma_addr);
1569                 }
1570                 dma_wmb(); /* RACT bit must be set after all the above writes */
1571                 if (entry >= mdp->num_rx_ring - 1)
1572                         rxdesc->status |=
1573                                 cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE);
1574                 else
1575                         rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP);
1576         }
1577
1578         /* Restart Rx engine if stopped. */
1579         /* If we don't need to check status, don't. -KDU */
1580         if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1581                 /* fix the values for the next receiving if RDE is set */
1582                 if (intr_status & EESR_RDE &&
1583                     mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) {
1584                         u32 count = (sh_eth_read(ndev, RDFAR) -
1585                                      sh_eth_read(ndev, RDLAR)) >> 4;
1586
1587                         mdp->cur_rx = count;
1588                         mdp->dirty_rx = count;
1589                 }
1590                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1591         }
1592
1593         *quota -= limit - boguscnt - 1;
1594
1595         return *quota <= 0;
1596 }
1597
1598 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1599 {
1600         /* disable tx and rx */
1601         sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1602 }
1603
1604 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1605 {
1606         /* enable tx and rx */
1607         sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1608 }
1609
1610 /* E-MAC interrupt handler */
1611 static void sh_eth_emac_interrupt(struct net_device *ndev)
1612 {
1613         struct sh_eth_private *mdp = netdev_priv(ndev);
1614         u32 felic_stat;
1615         u32 link_stat;
1616
1617         felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR);
1618         sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
1619         if (felic_stat & ECSR_ICD)
1620                 ndev->stats.tx_carrier_errors++;
1621         if (felic_stat & ECSR_MPD)
1622                 pm_wakeup_event(&mdp->pdev->dev, 0);
1623         if (felic_stat & ECSR_LCHNG) {
1624                 /* Link Changed */
1625                 if (mdp->cd->no_psr || mdp->no_ether_link)
1626                         return;
1627                 link_stat = sh_eth_read(ndev, PSR);
1628                 if (mdp->ether_link_active_low)
1629                         link_stat = ~link_stat;
1630                 if (!(link_stat & PHY_ST_LINK)) {
1631                         sh_eth_rcv_snd_disable(ndev);
1632                 } else {
1633                         /* Link Up */
1634                         sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0);
1635                         /* clear int */
1636                         sh_eth_modify(ndev, ECSR, 0, 0);
1637                         sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP);
1638                         /* enable tx and rx */
1639                         sh_eth_rcv_snd_enable(ndev);
1640                 }
1641         }
1642 }
1643
1644 /* error control function */
1645 static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1646 {
1647         struct sh_eth_private *mdp = netdev_priv(ndev);
1648         u32 mask;
1649
1650         if (intr_status & EESR_TWB) {
1651                 /* Unused write back interrupt */
1652                 if (intr_status & EESR_TABT) {  /* Transmit Abort int */
1653                         ndev->stats.tx_aborted_errors++;
1654                         netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1655                 }
1656         }
1657
1658         if (intr_status & EESR_RABT) {
1659                 /* Receive Abort int */
1660                 if (intr_status & EESR_RFRMER) {
1661                         /* Receive Frame Overflow int */
1662                         ndev->stats.rx_frame_errors++;
1663                 }
1664         }
1665
1666         if (intr_status & EESR_TDE) {
1667                 /* Transmit Descriptor Empty int */
1668                 ndev->stats.tx_fifo_errors++;
1669                 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1670         }
1671
1672         if (intr_status & EESR_TFE) {
1673                 /* FIFO under flow */
1674                 ndev->stats.tx_fifo_errors++;
1675                 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1676         }
1677
1678         if (intr_status & EESR_RDE) {
1679                 /* Receive Descriptor Empty int */
1680                 ndev->stats.rx_over_errors++;
1681         }
1682
1683         if (intr_status & EESR_RFE) {
1684                 /* Receive FIFO Overflow int */
1685                 ndev->stats.rx_fifo_errors++;
1686         }
1687
1688         if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1689                 /* Address Error */
1690                 ndev->stats.tx_fifo_errors++;
1691                 netif_err(mdp, tx_err, ndev, "Address Error\n");
1692         }
1693
1694         mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1695         if (mdp->cd->no_ade)
1696                 mask &= ~EESR_ADE;
1697         if (intr_status & mask) {
1698                 /* Tx error */
1699                 u32 edtrr = sh_eth_read(ndev, EDTRR);
1700
1701                 /* dmesg */
1702                 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1703                            intr_status, mdp->cur_tx, mdp->dirty_tx,
1704                            (u32)ndev->state, edtrr);
1705                 /* dirty buffer free */
1706                 sh_eth_tx_free(ndev, true);
1707
1708                 /* SH7712 BUG */
1709                 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1710                         /* tx dma start */
1711                         sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1712                 }
1713                 /* wakeup */
1714                 netif_wake_queue(ndev);
1715         }
1716 }
1717
1718 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1719 {
1720         struct net_device *ndev = netdev;
1721         struct sh_eth_private *mdp = netdev_priv(ndev);
1722         struct sh_eth_cpu_data *cd = mdp->cd;
1723         irqreturn_t ret = IRQ_NONE;
1724         u32 intr_status, intr_enable;
1725
1726         spin_lock(&mdp->lock);
1727
1728         /* Get interrupt status */
1729         intr_status = sh_eth_read(ndev, EESR);
1730         /* Mask it with the interrupt mask, forcing ECI interrupt  to be always
1731          * enabled since it's the one that  comes  thru regardless of the mask,
1732          * and  we need to fully handle it  in sh_eth_emac_interrupt() in order
1733          * to quench it as it doesn't get cleared by just writing 1 to the  ECI
1734          * bit...
1735          */
1736         intr_enable = sh_eth_read(ndev, EESIPR);
1737         intr_status &= intr_enable | EESIPR_ECIIP;
1738         if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI |
1739                            cd->eesr_err_check))
1740                 ret = IRQ_HANDLED;
1741         else
1742                 goto out;
1743
1744         if (unlikely(!mdp->irq_enabled)) {
1745                 sh_eth_write(ndev, 0, EESIPR);
1746                 goto out;
1747         }
1748
1749         if (intr_status & EESR_RX_CHECK) {
1750                 if (napi_schedule_prep(&mdp->napi)) {
1751                         /* Mask Rx interrupts */
1752                         sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1753                                      EESIPR);
1754                         __napi_schedule(&mdp->napi);
1755                 } else {
1756                         netdev_warn(ndev,
1757                                     "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1758                                     intr_status, intr_enable);
1759                 }
1760         }
1761
1762         /* Tx Check */
1763         if (intr_status & cd->tx_check) {
1764                 /* Clear Tx interrupts */
1765                 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1766
1767                 sh_eth_tx_free(ndev, true);
1768                 netif_wake_queue(ndev);
1769         }
1770
1771         /* E-MAC interrupt */
1772         if (intr_status & EESR_ECI)
1773                 sh_eth_emac_interrupt(ndev);
1774
1775         if (intr_status & cd->eesr_err_check) {
1776                 /* Clear error interrupts */
1777                 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1778
1779                 sh_eth_error(ndev, intr_status);
1780         }
1781
1782 out:
1783         spin_unlock(&mdp->lock);
1784
1785         return ret;
1786 }
1787
1788 static int sh_eth_poll(struct napi_struct *napi, int budget)
1789 {
1790         struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1791                                                   napi);
1792         struct net_device *ndev = napi->dev;
1793         int quota = budget;
1794         u32 intr_status;
1795
1796         for (;;) {
1797                 intr_status = sh_eth_read(ndev, EESR);
1798                 if (!(intr_status & EESR_RX_CHECK))
1799                         break;
1800                 /* Clear Rx interrupts */
1801                 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1802
1803                 if (sh_eth_rx(ndev, intr_status, &quota))
1804                         goto out;
1805         }
1806
1807         napi_complete(napi);
1808
1809         /* Reenable Rx interrupts */
1810         if (mdp->irq_enabled)
1811                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1812 out:
1813         return budget - quota;
1814 }
1815
1816 /* PHY state control function */
1817 static void sh_eth_adjust_link(struct net_device *ndev)
1818 {
1819         struct sh_eth_private *mdp = netdev_priv(ndev);
1820         struct phy_device *phydev = ndev->phydev;
1821         int new_state = 0;
1822
1823         if (phydev->link) {
1824                 if (phydev->duplex != mdp->duplex) {
1825                         new_state = 1;
1826                         mdp->duplex = phydev->duplex;
1827                         if (mdp->cd->set_duplex)
1828                                 mdp->cd->set_duplex(ndev);
1829                 }
1830
1831                 if (phydev->speed != mdp->speed) {
1832                         new_state = 1;
1833                         mdp->speed = phydev->speed;
1834                         if (mdp->cd->set_rate)
1835                                 mdp->cd->set_rate(ndev);
1836                 }
1837                 if (!mdp->link) {
1838                         sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
1839                         new_state = 1;
1840                         mdp->link = phydev->link;
1841                         if (mdp->cd->no_psr || mdp->no_ether_link)
1842                                 sh_eth_rcv_snd_enable(ndev);
1843                 }
1844         } else if (mdp->link) {
1845                 new_state = 1;
1846                 mdp->link = 0;
1847                 mdp->speed = 0;
1848                 mdp->duplex = -1;
1849                 if (mdp->cd->no_psr || mdp->no_ether_link)
1850                         sh_eth_rcv_snd_disable(ndev);
1851         }
1852
1853         if (new_state && netif_msg_link(mdp))
1854                 phy_print_status(phydev);
1855 }
1856
1857 /* PHY init function */
1858 static int sh_eth_phy_init(struct net_device *ndev)
1859 {
1860         struct device_node *np = ndev->dev.parent->of_node;
1861         struct sh_eth_private *mdp = netdev_priv(ndev);
1862         struct phy_device *phydev;
1863
1864         mdp->link = 0;
1865         mdp->speed = 0;
1866         mdp->duplex = -1;
1867
1868         /* Try connect to PHY */
1869         if (np) {
1870                 struct device_node *pn;
1871
1872                 pn = of_parse_phandle(np, "phy-handle", 0);
1873                 phydev = of_phy_connect(ndev, pn,
1874                                         sh_eth_adjust_link, 0,
1875                                         mdp->phy_interface);
1876
1877                 of_node_put(pn);
1878                 if (!phydev)
1879                         phydev = ERR_PTR(-ENOENT);
1880         } else {
1881                 char phy_id[MII_BUS_ID_SIZE + 3];
1882
1883                 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1884                          mdp->mii_bus->id, mdp->phy_id);
1885
1886                 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1887                                      mdp->phy_interface);
1888         }
1889
1890         if (IS_ERR(phydev)) {
1891                 netdev_err(ndev, "failed to connect PHY\n");
1892                 return PTR_ERR(phydev);
1893         }
1894
1895         /* mask with MAC supported features */
1896         if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
1897                 int err = phy_set_max_speed(phydev, SPEED_100);
1898                 if (err) {
1899                         netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
1900                         phy_disconnect(phydev);
1901                         return err;
1902                 }
1903         }
1904
1905         phy_attached_info(phydev);
1906
1907         return 0;
1908 }
1909
1910 /* PHY control start function */
1911 static int sh_eth_phy_start(struct net_device *ndev)
1912 {
1913         int ret;
1914
1915         ret = sh_eth_phy_init(ndev);
1916         if (ret)
1917                 return ret;
1918
1919         phy_start(ndev->phydev);
1920
1921         return 0;
1922 }
1923
1924 static int sh_eth_get_link_ksettings(struct net_device *ndev,
1925                                      struct ethtool_link_ksettings *cmd)
1926 {
1927         struct sh_eth_private *mdp = netdev_priv(ndev);
1928         unsigned long flags;
1929
1930         if (!ndev->phydev)
1931                 return -ENODEV;
1932
1933         spin_lock_irqsave(&mdp->lock, flags);
1934         phy_ethtool_ksettings_get(ndev->phydev, cmd);
1935         spin_unlock_irqrestore(&mdp->lock, flags);
1936
1937         return 0;
1938 }
1939
1940 static int sh_eth_set_link_ksettings(struct net_device *ndev,
1941                                      const struct ethtool_link_ksettings *cmd)
1942 {
1943         struct sh_eth_private *mdp = netdev_priv(ndev);
1944         unsigned long flags;
1945         int ret;
1946
1947         if (!ndev->phydev)
1948                 return -ENODEV;
1949
1950         spin_lock_irqsave(&mdp->lock, flags);
1951
1952         /* disable tx and rx */
1953         sh_eth_rcv_snd_disable(ndev);
1954
1955         ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
1956         if (ret)
1957                 goto error_exit;
1958
1959         if (cmd->base.duplex == DUPLEX_FULL)
1960                 mdp->duplex = 1;
1961         else
1962                 mdp->duplex = 0;
1963
1964         if (mdp->cd->set_duplex)
1965                 mdp->cd->set_duplex(ndev);
1966
1967 error_exit:
1968         mdelay(1);
1969
1970         /* enable tx and rx */
1971         sh_eth_rcv_snd_enable(ndev);
1972
1973         spin_unlock_irqrestore(&mdp->lock, flags);
1974
1975         return ret;
1976 }
1977
1978 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
1979  * version must be bumped as well.  Just adding registers up to that
1980  * limit is fine, as long as the existing register indices don't
1981  * change.
1982  */
1983 #define SH_ETH_REG_DUMP_VERSION         1
1984 #define SH_ETH_REG_DUMP_MAX_REGS        256
1985
1986 static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
1987 {
1988         struct sh_eth_private *mdp = netdev_priv(ndev);
1989         struct sh_eth_cpu_data *cd = mdp->cd;
1990         u32 *valid_map;
1991         size_t len;
1992
1993         BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
1994
1995         /* Dump starts with a bitmap that tells ethtool which
1996          * registers are defined for this chip.
1997          */
1998         len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
1999         if (buf) {
2000                 valid_map = buf;
2001                 buf += len;
2002         } else {
2003                 valid_map = NULL;
2004         }
2005
2006         /* Add a register to the dump, if it has a defined offset.
2007          * This automatically skips most undefined registers, but for
2008          * some it is also necessary to check a capability flag in
2009          * struct sh_eth_cpu_data.
2010          */
2011 #define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
2012 #define add_reg_from(reg, read_expr) do {                               \
2013                 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) {    \
2014                         if (buf) {                                      \
2015                                 mark_reg_valid(reg);                    \
2016                                 *buf++ = read_expr;                     \
2017                         }                                               \
2018                         ++len;                                          \
2019                 }                                                       \
2020         } while (0)
2021 #define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
2022 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
2023
2024         add_reg(EDSR);
2025         add_reg(EDMR);
2026         add_reg(EDTRR);
2027         add_reg(EDRRR);
2028         add_reg(EESR);
2029         add_reg(EESIPR);
2030         add_reg(TDLAR);
2031         add_reg(TDFAR);
2032         add_reg(TDFXR);
2033         add_reg(TDFFR);
2034         add_reg(RDLAR);
2035         add_reg(RDFAR);
2036         add_reg(RDFXR);
2037         add_reg(RDFFR);
2038         add_reg(TRSCER);
2039         add_reg(RMFCR);
2040         add_reg(TFTR);
2041         add_reg(FDR);
2042         add_reg(RMCR);
2043         add_reg(TFUCR);
2044         add_reg(RFOCR);
2045         if (cd->rmiimode)
2046                 add_reg(RMIIMODE);
2047         add_reg(FCFTR);
2048         if (cd->rpadir)
2049                 add_reg(RPADIR);
2050         if (!cd->no_trimd)
2051                 add_reg(TRIMD);
2052         add_reg(ECMR);
2053         add_reg(ECSR);
2054         add_reg(ECSIPR);
2055         add_reg(PIR);
2056         if (!cd->no_psr)
2057                 add_reg(PSR);
2058         add_reg(RDMLR);
2059         add_reg(RFLR);
2060         add_reg(IPGR);
2061         if (cd->apr)
2062                 add_reg(APR);
2063         if (cd->mpr)
2064                 add_reg(MPR);
2065         add_reg(RFCR);
2066         add_reg(RFCF);
2067         if (cd->tpauser)
2068                 add_reg(TPAUSER);
2069         add_reg(TPAUSECR);
2070         add_reg(GECMR);
2071         if (cd->bculr)
2072                 add_reg(BCULR);
2073         add_reg(MAHR);
2074         add_reg(MALR);
2075         add_reg(TROCR);
2076         add_reg(CDCR);
2077         add_reg(LCCR);
2078         add_reg(CNDCR);
2079         add_reg(CEFCR);
2080         add_reg(FRECR);
2081         add_reg(TSFRCR);
2082         add_reg(TLFRCR);
2083         add_reg(CERCR);
2084         add_reg(CEECR);
2085         add_reg(MAFCR);
2086         if (cd->rtrate)
2087                 add_reg(RTRATE);
2088         if (cd->hw_checksum)
2089                 add_reg(CSMR);
2090         if (cd->select_mii)
2091                 add_reg(RMII_MII);
2092         add_reg(ARSTR);
2093         if (cd->tsu) {
2094                 add_tsu_reg(TSU_CTRST);
2095                 add_tsu_reg(TSU_FWEN0);
2096                 add_tsu_reg(TSU_FWEN1);
2097                 add_tsu_reg(TSU_FCM);
2098                 add_tsu_reg(TSU_BSYSL0);
2099                 add_tsu_reg(TSU_BSYSL1);
2100                 add_tsu_reg(TSU_PRISL0);
2101                 add_tsu_reg(TSU_PRISL1);
2102                 add_tsu_reg(TSU_FWSL0);
2103                 add_tsu_reg(TSU_FWSL1);
2104                 add_tsu_reg(TSU_FWSLC);
2105                 add_tsu_reg(TSU_QTAG0);
2106                 add_tsu_reg(TSU_QTAG1);
2107                 add_tsu_reg(TSU_QTAGM0);
2108                 add_tsu_reg(TSU_QTAGM1);
2109                 add_tsu_reg(TSU_FWSR);
2110                 add_tsu_reg(TSU_FWINMK);
2111                 add_tsu_reg(TSU_ADQT0);
2112                 add_tsu_reg(TSU_ADQT1);
2113                 add_tsu_reg(TSU_VTAG0);
2114                 add_tsu_reg(TSU_VTAG1);
2115                 add_tsu_reg(TSU_ADSBSY);
2116                 add_tsu_reg(TSU_TEN);
2117                 add_tsu_reg(TSU_POST1);
2118                 add_tsu_reg(TSU_POST2);
2119                 add_tsu_reg(TSU_POST3);
2120                 add_tsu_reg(TSU_POST4);
2121                 if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) {
2122                         /* This is the start of a table, not just a single
2123                          * register.
2124                          */
2125                         if (buf) {
2126                                 unsigned int i;
2127
2128                                 mark_reg_valid(TSU_ADRH0);
2129                                 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
2130                                         *buf++ = ioread32(
2131                                                 mdp->tsu_addr +
2132                                                 mdp->reg_offset[TSU_ADRH0] +
2133                                                 i * 4);
2134                         }
2135                         len += SH_ETH_TSU_CAM_ENTRIES * 2;
2136                 }
2137         }
2138
2139 #undef mark_reg_valid
2140 #undef add_reg_from
2141 #undef add_reg
2142 #undef add_tsu_reg
2143
2144         return len * 4;
2145 }
2146
2147 static int sh_eth_get_regs_len(struct net_device *ndev)
2148 {
2149         return __sh_eth_get_regs(ndev, NULL);
2150 }
2151
2152 static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2153                             void *buf)
2154 {
2155         struct sh_eth_private *mdp = netdev_priv(ndev);
2156
2157         regs->version = SH_ETH_REG_DUMP_VERSION;
2158
2159         pm_runtime_get_sync(&mdp->pdev->dev);
2160         __sh_eth_get_regs(ndev, buf);
2161         pm_runtime_put_sync(&mdp->pdev->dev);
2162 }
2163
2164 static int sh_eth_nway_reset(struct net_device *ndev)
2165 {
2166         struct sh_eth_private *mdp = netdev_priv(ndev);
2167         unsigned long flags;
2168         int ret;
2169
2170         if (!ndev->phydev)
2171                 return -ENODEV;
2172
2173         spin_lock_irqsave(&mdp->lock, flags);
2174         ret = phy_start_aneg(ndev->phydev);
2175         spin_unlock_irqrestore(&mdp->lock, flags);
2176
2177         return ret;
2178 }
2179
2180 static u32 sh_eth_get_msglevel(struct net_device *ndev)
2181 {
2182         struct sh_eth_private *mdp = netdev_priv(ndev);
2183         return mdp->msg_enable;
2184 }
2185
2186 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
2187 {
2188         struct sh_eth_private *mdp = netdev_priv(ndev);
2189         mdp->msg_enable = value;
2190 }
2191
2192 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
2193         "rx_current", "tx_current",
2194         "rx_dirty", "tx_dirty",
2195 };
2196 #define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
2197
2198 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
2199 {
2200         switch (sset) {
2201         case ETH_SS_STATS:
2202                 return SH_ETH_STATS_LEN;
2203         default:
2204                 return -EOPNOTSUPP;
2205         }
2206 }
2207
2208 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
2209                                      struct ethtool_stats *stats, u64 *data)
2210 {
2211         struct sh_eth_private *mdp = netdev_priv(ndev);
2212         int i = 0;
2213
2214         /* device-specific stats */
2215         data[i++] = mdp->cur_rx;
2216         data[i++] = mdp->cur_tx;
2217         data[i++] = mdp->dirty_rx;
2218         data[i++] = mdp->dirty_tx;
2219 }
2220
2221 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
2222 {
2223         switch (stringset) {
2224         case ETH_SS_STATS:
2225                 memcpy(data, *sh_eth_gstrings_stats,
2226                        sizeof(sh_eth_gstrings_stats));
2227                 break;
2228         }
2229 }
2230
2231 static void sh_eth_get_ringparam(struct net_device *ndev,
2232                                  struct ethtool_ringparam *ring)
2233 {
2234         struct sh_eth_private *mdp = netdev_priv(ndev);
2235
2236         ring->rx_max_pending = RX_RING_MAX;
2237         ring->tx_max_pending = TX_RING_MAX;
2238         ring->rx_pending = mdp->num_rx_ring;
2239         ring->tx_pending = mdp->num_tx_ring;
2240 }
2241
2242 static int sh_eth_set_ringparam(struct net_device *ndev,
2243                                 struct ethtool_ringparam *ring)
2244 {
2245         struct sh_eth_private *mdp = netdev_priv(ndev);
2246         int ret;
2247
2248         if (ring->tx_pending > TX_RING_MAX ||
2249             ring->rx_pending > RX_RING_MAX ||
2250             ring->tx_pending < TX_RING_MIN ||
2251             ring->rx_pending < RX_RING_MIN)
2252                 return -EINVAL;
2253         if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2254                 return -EINVAL;
2255
2256         if (netif_running(ndev)) {
2257                 netif_device_detach(ndev);
2258                 netif_tx_disable(ndev);
2259
2260                 /* Serialise with the interrupt handler and NAPI, then
2261                  * disable interrupts.  We have to clear the
2262                  * irq_enabled flag first to ensure that interrupts
2263                  * won't be re-enabled.
2264                  */
2265                 mdp->irq_enabled = false;
2266                 synchronize_irq(ndev->irq);
2267                 napi_synchronize(&mdp->napi);
2268                 sh_eth_write(ndev, 0x0000, EESIPR);
2269
2270                 sh_eth_dev_exit(ndev);
2271
2272                 /* Free all the skbuffs in the Rx queue and the DMA buffers. */
2273                 sh_eth_ring_free(ndev);
2274         }
2275
2276         /* Set new parameters */
2277         mdp->num_rx_ring = ring->rx_pending;
2278         mdp->num_tx_ring = ring->tx_pending;
2279
2280         if (netif_running(ndev)) {
2281                 ret = sh_eth_ring_init(ndev);
2282                 if (ret < 0) {
2283                         netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2284                                    __func__);
2285                         return ret;
2286                 }
2287                 ret = sh_eth_dev_init(ndev);
2288                 if (ret < 0) {
2289                         netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2290                                    __func__);
2291                         return ret;
2292                 }
2293
2294                 netif_device_attach(ndev);
2295         }
2296
2297         return 0;
2298 }
2299
2300 static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2301 {
2302         struct sh_eth_private *mdp = netdev_priv(ndev);
2303
2304         wol->supported = 0;
2305         wol->wolopts = 0;
2306
2307         if (mdp->cd->magic && mdp->clk) {
2308                 wol->supported = WAKE_MAGIC;
2309                 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
2310         }
2311 }
2312
2313 static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2314 {
2315         struct sh_eth_private *mdp = netdev_priv(ndev);
2316
2317         if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC)
2318                 return -EOPNOTSUPP;
2319
2320         mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
2321
2322         device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled);
2323
2324         return 0;
2325 }
2326
2327 static const struct ethtool_ops sh_eth_ethtool_ops = {
2328         .get_regs_len   = sh_eth_get_regs_len,
2329         .get_regs       = sh_eth_get_regs,
2330         .nway_reset     = sh_eth_nway_reset,
2331         .get_msglevel   = sh_eth_get_msglevel,
2332         .set_msglevel   = sh_eth_set_msglevel,
2333         .get_link       = ethtool_op_get_link,
2334         .get_strings    = sh_eth_get_strings,
2335         .get_ethtool_stats  = sh_eth_get_ethtool_stats,
2336         .get_sset_count     = sh_eth_get_sset_count,
2337         .get_ringparam  = sh_eth_get_ringparam,
2338         .set_ringparam  = sh_eth_set_ringparam,
2339         .get_link_ksettings = sh_eth_get_link_ksettings,
2340         .set_link_ksettings = sh_eth_set_link_ksettings,
2341         .get_wol        = sh_eth_get_wol,
2342         .set_wol        = sh_eth_set_wol,
2343 };
2344
2345 /* network device open function */
2346 static int sh_eth_open(struct net_device *ndev)
2347 {
2348         struct sh_eth_private *mdp = netdev_priv(ndev);
2349         int ret;
2350
2351         pm_runtime_get_sync(&mdp->pdev->dev);
2352
2353         napi_enable(&mdp->napi);
2354
2355         ret = request_irq(ndev->irq, sh_eth_interrupt,
2356                           mdp->cd->irq_flags, ndev->name, ndev);
2357         if (ret) {
2358                 netdev_err(ndev, "Can not assign IRQ number\n");
2359                 goto out_napi_off;
2360         }
2361
2362         /* Descriptor set */
2363         ret = sh_eth_ring_init(ndev);
2364         if (ret)
2365                 goto out_free_irq;
2366
2367         /* device init */
2368         ret = sh_eth_dev_init(ndev);
2369         if (ret)
2370                 goto out_free_irq;
2371
2372         /* PHY control start*/
2373         ret = sh_eth_phy_start(ndev);
2374         if (ret)
2375                 goto out_free_irq;
2376
2377         netif_start_queue(ndev);
2378
2379         mdp->is_opened = 1;
2380
2381         return ret;
2382
2383 out_free_irq:
2384         free_irq(ndev->irq, ndev);
2385 out_napi_off:
2386         napi_disable(&mdp->napi);
2387         pm_runtime_put_sync(&mdp->pdev->dev);
2388         return ret;
2389 }
2390
2391 /* Timeout function */
2392 static void sh_eth_tx_timeout(struct net_device *ndev)
2393 {
2394         struct sh_eth_private *mdp = netdev_priv(ndev);
2395         struct sh_eth_rxdesc *rxdesc;
2396         int i;
2397
2398         netif_stop_queue(ndev);
2399
2400         netif_err(mdp, timer, ndev,
2401                   "transmit timed out, status %8.8x, resetting...\n",
2402                   sh_eth_read(ndev, EESR));
2403
2404         /* tx_errors count up */
2405         ndev->stats.tx_errors++;
2406
2407         /* Free all the skbuffs in the Rx queue. */
2408         for (i = 0; i < mdp->num_rx_ring; i++) {
2409                 rxdesc = &mdp->rx_ring[i];
2410                 rxdesc->status = cpu_to_le32(0);
2411                 rxdesc->addr = cpu_to_le32(0xBADF00D0);
2412                 dev_kfree_skb(mdp->rx_skbuff[i]);
2413                 mdp->rx_skbuff[i] = NULL;
2414         }
2415         for (i = 0; i < mdp->num_tx_ring; i++) {
2416                 dev_kfree_skb(mdp->tx_skbuff[i]);
2417                 mdp->tx_skbuff[i] = NULL;
2418         }
2419
2420         /* device init */
2421         sh_eth_dev_init(ndev);
2422
2423         netif_start_queue(ndev);
2424 }
2425
2426 /* Packet transmit function */
2427 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2428 {
2429         struct sh_eth_private *mdp = netdev_priv(ndev);
2430         struct sh_eth_txdesc *txdesc;
2431         dma_addr_t dma_addr;
2432         u32 entry;
2433         unsigned long flags;
2434
2435         spin_lock_irqsave(&mdp->lock, flags);
2436         if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2437                 if (!sh_eth_tx_free(ndev, true)) {
2438                         netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2439                         netif_stop_queue(ndev);
2440                         spin_unlock_irqrestore(&mdp->lock, flags);
2441                         return NETDEV_TX_BUSY;
2442                 }
2443         }
2444         spin_unlock_irqrestore(&mdp->lock, flags);
2445
2446         if (skb_put_padto(skb, ETH_ZLEN))
2447                 return NETDEV_TX_OK;
2448
2449         entry = mdp->cur_tx % mdp->num_tx_ring;
2450         mdp->tx_skbuff[entry] = skb;
2451         txdesc = &mdp->tx_ring[entry];
2452         /* soft swap. */
2453         if (!mdp->cd->hw_swap)
2454                 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2455         dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
2456                                   DMA_TO_DEVICE);
2457         if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
2458                 kfree_skb(skb);
2459                 return NETDEV_TX_OK;
2460         }
2461         txdesc->addr = cpu_to_le32(dma_addr);
2462         txdesc->len  = cpu_to_le32(skb->len << 16);
2463
2464         dma_wmb(); /* TACT bit must be set after all the above writes */
2465         if (entry >= mdp->num_tx_ring - 1)
2466                 txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
2467         else
2468                 txdesc->status |= cpu_to_le32(TD_TACT);
2469
2470         mdp->cur_tx++;
2471
2472         if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2473                 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2474
2475         return NETDEV_TX_OK;
2476 }
2477
2478 /* The statistics registers have write-clear behaviour, which means we
2479  * will lose any increment between the read and write.  We mitigate
2480  * this by only clearing when we read a non-zero value, so we will
2481  * never falsely report a total of zero.
2482  */
2483 static void
2484 sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
2485 {
2486         u32 delta = sh_eth_read(ndev, reg);
2487
2488         if (delta) {
2489                 *stat += delta;
2490                 sh_eth_write(ndev, 0, reg);
2491         }
2492 }
2493
2494 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2495 {
2496         struct sh_eth_private *mdp = netdev_priv(ndev);
2497
2498         if (sh_eth_is_rz_fast_ether(mdp))
2499                 return &ndev->stats;
2500
2501         if (!mdp->is_opened)
2502                 return &ndev->stats;
2503
2504         sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
2505         sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
2506         sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
2507
2508         if (sh_eth_is_gether(mdp)) {
2509                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2510                                    CERCR);
2511                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2512                                    CEECR);
2513         } else {
2514                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2515                                    CNDCR);
2516         }
2517
2518         return &ndev->stats;
2519 }
2520
2521 /* device close function */
2522 static int sh_eth_close(struct net_device *ndev)
2523 {
2524         struct sh_eth_private *mdp = netdev_priv(ndev);
2525
2526         netif_stop_queue(ndev);
2527
2528         /* Serialise with the interrupt handler and NAPI, then disable
2529          * interrupts.  We have to clear the irq_enabled flag first to
2530          * ensure that interrupts won't be re-enabled.
2531          */
2532         mdp->irq_enabled = false;
2533         synchronize_irq(ndev->irq);
2534         napi_disable(&mdp->napi);
2535         sh_eth_write(ndev, 0x0000, EESIPR);
2536
2537         sh_eth_dev_exit(ndev);
2538
2539         /* PHY Disconnect */
2540         if (ndev->phydev) {
2541                 phy_stop(ndev->phydev);
2542                 phy_disconnect(ndev->phydev);
2543         }
2544
2545         free_irq(ndev->irq, ndev);
2546
2547         /* Free all the skbuffs in the Rx queue and the DMA buffer. */
2548         sh_eth_ring_free(ndev);
2549
2550         pm_runtime_put_sync(&mdp->pdev->dev);
2551
2552         mdp->is_opened = 0;
2553
2554         return 0;
2555 }
2556
2557 /* ioctl to device function */
2558 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2559 {
2560         struct phy_device *phydev = ndev->phydev;
2561
2562         if (!netif_running(ndev))
2563                 return -EINVAL;
2564
2565         if (!phydev)
2566                 return -ENODEV;
2567
2568         return phy_mii_ioctl(phydev, rq, cmd);
2569 }
2570
2571 static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
2572 {
2573         if (netif_running(ndev))
2574                 return -EBUSY;
2575
2576         ndev->mtu = new_mtu;
2577         netdev_update_features(ndev);
2578
2579         return 0;
2580 }
2581
2582 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2583 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2584                                             int entry)
2585 {
2586         return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2587 }
2588
2589 static u32 sh_eth_tsu_get_post_mask(int entry)
2590 {
2591         return 0x0f << (28 - ((entry % 8) * 4));
2592 }
2593
2594 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2595 {
2596         return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2597 }
2598
2599 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2600                                              int entry)
2601 {
2602         struct sh_eth_private *mdp = netdev_priv(ndev);
2603         u32 tmp;
2604         void *reg_offset;
2605
2606         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2607         tmp = ioread32(reg_offset);
2608         iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2609 }
2610
2611 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2612                                               int entry)
2613 {
2614         struct sh_eth_private *mdp = netdev_priv(ndev);
2615         u32 post_mask, ref_mask, tmp;
2616         void *reg_offset;
2617
2618         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2619         post_mask = sh_eth_tsu_get_post_mask(entry);
2620         ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2621
2622         tmp = ioread32(reg_offset);
2623         iowrite32(tmp & ~post_mask, reg_offset);
2624
2625         /* If other port enables, the function returns "true" */
2626         return tmp & ref_mask;
2627 }
2628
2629 static int sh_eth_tsu_busy(struct net_device *ndev)
2630 {
2631         int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2632         struct sh_eth_private *mdp = netdev_priv(ndev);
2633
2634         while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2635                 udelay(10);
2636                 timeout--;
2637                 if (timeout <= 0) {
2638                         netdev_err(ndev, "%s: timeout\n", __func__);
2639                         return -ETIMEDOUT;
2640                 }
2641         }
2642
2643         return 0;
2644 }
2645
2646 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2647                                   const u8 *addr)
2648 {
2649         u32 val;
2650
2651         val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2652         iowrite32(val, reg);
2653         if (sh_eth_tsu_busy(ndev) < 0)
2654                 return -EBUSY;
2655
2656         val = addr[4] << 8 | addr[5];
2657         iowrite32(val, reg + 4);
2658         if (sh_eth_tsu_busy(ndev) < 0)
2659                 return -EBUSY;
2660
2661         return 0;
2662 }
2663
2664 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2665 {
2666         u32 val;
2667
2668         val = ioread32(reg);
2669         addr[0] = (val >> 24) & 0xff;
2670         addr[1] = (val >> 16) & 0xff;
2671         addr[2] = (val >> 8) & 0xff;
2672         addr[3] = val & 0xff;
2673         val = ioread32(reg + 4);
2674         addr[4] = (val >> 8) & 0xff;
2675         addr[5] = val & 0xff;
2676 }
2677
2678
2679 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2680 {
2681         struct sh_eth_private *mdp = netdev_priv(ndev);
2682         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2683         int i;
2684         u8 c_addr[ETH_ALEN];
2685
2686         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2687                 sh_eth_tsu_read_entry(reg_offset, c_addr);
2688                 if (ether_addr_equal(addr, c_addr))
2689                         return i;
2690         }
2691
2692         return -ENOENT;
2693 }
2694
2695 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2696 {
2697         u8 blank[ETH_ALEN];
2698         int entry;
2699
2700         memset(blank, 0, sizeof(blank));
2701         entry = sh_eth_tsu_find_entry(ndev, blank);
2702         return (entry < 0) ? -ENOMEM : entry;
2703 }
2704
2705 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2706                                               int entry)
2707 {
2708         struct sh_eth_private *mdp = netdev_priv(ndev);
2709         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2710         int ret;
2711         u8 blank[ETH_ALEN];
2712
2713         sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2714                          ~(1 << (31 - entry)), TSU_TEN);
2715
2716         memset(blank, 0, sizeof(blank));
2717         ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2718         if (ret < 0)
2719                 return ret;
2720         return 0;
2721 }
2722
2723 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2724 {
2725         struct sh_eth_private *mdp = netdev_priv(ndev);
2726         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2727         int i, ret;
2728
2729         if (!mdp->cd->tsu)
2730                 return 0;
2731
2732         i = sh_eth_tsu_find_entry(ndev, addr);
2733         if (i < 0) {
2734                 /* No entry found, create one */
2735                 i = sh_eth_tsu_find_empty(ndev);
2736                 if (i < 0)
2737                         return -ENOMEM;
2738                 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2739                 if (ret < 0)
2740                         return ret;
2741
2742                 /* Enable the entry */
2743                 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2744                                  (1 << (31 - i)), TSU_TEN);
2745         }
2746
2747         /* Entry found or created, enable POST */
2748         sh_eth_tsu_enable_cam_entry_post(ndev, i);
2749
2750         return 0;
2751 }
2752
2753 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2754 {
2755         struct sh_eth_private *mdp = netdev_priv(ndev);
2756         int i, ret;
2757
2758         if (!mdp->cd->tsu)
2759                 return 0;
2760
2761         i = sh_eth_tsu_find_entry(ndev, addr);
2762         if (i) {
2763                 /* Entry found */
2764                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2765                         goto done;
2766
2767                 /* Disable the entry if both ports was disabled */
2768                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2769                 if (ret < 0)
2770                         return ret;
2771         }
2772 done:
2773         return 0;
2774 }
2775
2776 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2777 {
2778         struct sh_eth_private *mdp = netdev_priv(ndev);
2779         int i, ret;
2780
2781         if (!mdp->cd->tsu)
2782                 return 0;
2783
2784         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2785                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2786                         continue;
2787
2788                 /* Disable the entry if both ports was disabled */
2789                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2790                 if (ret < 0)
2791                         return ret;
2792         }
2793
2794         return 0;
2795 }
2796
2797 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2798 {
2799         struct sh_eth_private *mdp = netdev_priv(ndev);
2800         u8 addr[ETH_ALEN];
2801         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2802         int i;
2803
2804         if (!mdp->cd->tsu)
2805                 return;
2806
2807         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2808                 sh_eth_tsu_read_entry(reg_offset, addr);
2809                 if (is_multicast_ether_addr(addr))
2810                         sh_eth_tsu_del_entry(ndev, addr);
2811         }
2812 }
2813
2814 /* Update promiscuous flag and multicast filter */
2815 static void sh_eth_set_rx_mode(struct net_device *ndev)
2816 {
2817         struct sh_eth_private *mdp = netdev_priv(ndev);
2818         u32 ecmr_bits;
2819         int mcast_all = 0;
2820         unsigned long flags;
2821
2822         spin_lock_irqsave(&mdp->lock, flags);
2823         /* Initial condition is MCT = 1, PRM = 0.
2824          * Depending on ndev->flags, set PRM or clear MCT
2825          */
2826         ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2827         if (mdp->cd->tsu)
2828                 ecmr_bits |= ECMR_MCT;
2829
2830         if (!(ndev->flags & IFF_MULTICAST)) {
2831                 sh_eth_tsu_purge_mcast(ndev);
2832                 mcast_all = 1;
2833         }
2834         if (ndev->flags & IFF_ALLMULTI) {
2835                 sh_eth_tsu_purge_mcast(ndev);
2836                 ecmr_bits &= ~ECMR_MCT;
2837                 mcast_all = 1;
2838         }
2839
2840         if (ndev->flags & IFF_PROMISC) {
2841                 sh_eth_tsu_purge_all(ndev);
2842                 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2843         } else if (mdp->cd->tsu) {
2844                 struct netdev_hw_addr *ha;
2845                 netdev_for_each_mc_addr(ha, ndev) {
2846                         if (mcast_all && is_multicast_ether_addr(ha->addr))
2847                                 continue;
2848
2849                         if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2850                                 if (!mcast_all) {
2851                                         sh_eth_tsu_purge_mcast(ndev);
2852                                         ecmr_bits &= ~ECMR_MCT;
2853                                         mcast_all = 1;
2854                                 }
2855                         }
2856                 }
2857         }
2858
2859         /* update the ethernet mode */
2860         sh_eth_write(ndev, ecmr_bits, ECMR);
2861
2862         spin_unlock_irqrestore(&mdp->lock, flags);
2863 }
2864
2865 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2866 {
2867         if (!mdp->port)
2868                 return TSU_VTAG0;
2869         else
2870                 return TSU_VTAG1;
2871 }
2872
2873 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2874                                   __be16 proto, u16 vid)
2875 {
2876         struct sh_eth_private *mdp = netdev_priv(ndev);
2877         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2878
2879         if (unlikely(!mdp->cd->tsu))
2880                 return -EPERM;
2881
2882         /* No filtering if vid = 0 */
2883         if (!vid)
2884                 return 0;
2885
2886         mdp->vlan_num_ids++;
2887
2888         /* The controller has one VLAN tag HW filter. So, if the filter is
2889          * already enabled, the driver disables it and the filte
2890          */
2891         if (mdp->vlan_num_ids > 1) {
2892                 /* disable VLAN filter */
2893                 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2894                 return 0;
2895         }
2896
2897         sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2898                          vtag_reg_index);
2899
2900         return 0;
2901 }
2902
2903 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2904                                    __be16 proto, u16 vid)
2905 {
2906         struct sh_eth_private *mdp = netdev_priv(ndev);
2907         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2908
2909         if (unlikely(!mdp->cd->tsu))
2910                 return -EPERM;
2911
2912         /* No filtering if vid = 0 */
2913         if (!vid)
2914                 return 0;
2915
2916         mdp->vlan_num_ids--;
2917         sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2918
2919         return 0;
2920 }
2921
2922 /* SuperH's TSU register init function */
2923 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2924 {
2925         if (sh_eth_is_rz_fast_ether(mdp)) {
2926                 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2927                 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
2928                                  TSU_FWSLC);    /* Enable POST registers */
2929                 return;
2930         }
2931
2932         sh_eth_tsu_write(mdp, 0, TSU_FWEN0);    /* Disable forward(0->1) */
2933         sh_eth_tsu_write(mdp, 0, TSU_FWEN1);    /* Disable forward(1->0) */
2934         sh_eth_tsu_write(mdp, 0, TSU_FCM);      /* forward fifo 3k-3k */
2935         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2936         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2937         sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2938         sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2939         sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2940         sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2941         sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2942         if (sh_eth_is_gether(mdp)) {
2943                 sh_eth_tsu_write(mdp, 0, TSU_QTAG0);    /* Disable QTAG(0->1) */
2944                 sh_eth_tsu_write(mdp, 0, TSU_QTAG1);    /* Disable QTAG(1->0) */
2945         } else {
2946                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);   /* Disable QTAG(0->1) */
2947                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);   /* Disable QTAG(1->0) */
2948         }
2949         sh_eth_tsu_write(mdp, 0, TSU_FWSR);     /* all interrupt status clear */
2950         sh_eth_tsu_write(mdp, 0, TSU_FWINMK);   /* Disable all interrupt */
2951         sh_eth_tsu_write(mdp, 0, TSU_TEN);      /* Disable all CAM entry */
2952         sh_eth_tsu_write(mdp, 0, TSU_POST1);    /* Disable CAM entry [ 0- 7] */
2953         sh_eth_tsu_write(mdp, 0, TSU_POST2);    /* Disable CAM entry [ 8-15] */
2954         sh_eth_tsu_write(mdp, 0, TSU_POST3);    /* Disable CAM entry [16-23] */
2955         sh_eth_tsu_write(mdp, 0, TSU_POST4);    /* Disable CAM entry [24-31] */
2956 }
2957
2958 /* MDIO bus release function */
2959 static int sh_mdio_release(struct sh_eth_private *mdp)
2960 {
2961         /* unregister mdio bus */
2962         mdiobus_unregister(mdp->mii_bus);
2963
2964         /* free bitbang info */
2965         free_mdio_bitbang(mdp->mii_bus);
2966
2967         return 0;
2968 }
2969
2970 /* MDIO bus init function */
2971 static int sh_mdio_init(struct sh_eth_private *mdp,
2972                         struct sh_eth_plat_data *pd)
2973 {
2974         int ret;
2975         struct bb_info *bitbang;
2976         struct platform_device *pdev = mdp->pdev;
2977         struct device *dev = &mdp->pdev->dev;
2978
2979         /* create bit control struct for PHY */
2980         bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2981         if (!bitbang)
2982                 return -ENOMEM;
2983
2984         /* bitbang init */
2985         bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2986         bitbang->set_gate = pd->set_mdio_gate;
2987         bitbang->ctrl.ops = &bb_ops;
2988
2989         /* MII controller setting */
2990         mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2991         if (!mdp->mii_bus)
2992                 return -ENOMEM;
2993
2994         /* Hook up MII support for ethtool */
2995         mdp->mii_bus->name = "sh_mii";
2996         mdp->mii_bus->parent = dev;
2997         snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2998                  pdev->name, pdev->id);
2999
3000         /* register MDIO bus */
3001         if (dev->of_node) {
3002                 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
3003         } else {
3004                 if (pd->phy_irq > 0)
3005                         mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
3006
3007                 ret = mdiobus_register(mdp->mii_bus);
3008         }
3009
3010         if (ret)
3011                 goto out_free_bus;
3012
3013         return 0;
3014
3015 out_free_bus:
3016         free_mdio_bitbang(mdp->mii_bus);
3017         return ret;
3018 }
3019
3020 static const u16 *sh_eth_get_register_offset(int register_type)
3021 {
3022         const u16 *reg_offset = NULL;
3023
3024         switch (register_type) {
3025         case SH_ETH_REG_GIGABIT:
3026                 reg_offset = sh_eth_offset_gigabit;
3027                 break;
3028         case SH_ETH_REG_FAST_RZ:
3029                 reg_offset = sh_eth_offset_fast_rz;
3030                 break;
3031         case SH_ETH_REG_FAST_RCAR:
3032                 reg_offset = sh_eth_offset_fast_rcar;
3033                 break;
3034         case SH_ETH_REG_FAST_SH4:
3035                 reg_offset = sh_eth_offset_fast_sh4;
3036                 break;
3037         case SH_ETH_REG_FAST_SH3_SH2:
3038                 reg_offset = sh_eth_offset_fast_sh3_sh2;
3039                 break;
3040         }
3041
3042         return reg_offset;
3043 }
3044
3045 static const struct net_device_ops sh_eth_netdev_ops = {
3046         .ndo_open               = sh_eth_open,
3047         .ndo_stop               = sh_eth_close,
3048         .ndo_start_xmit         = sh_eth_start_xmit,
3049         .ndo_get_stats          = sh_eth_get_stats,
3050         .ndo_set_rx_mode        = sh_eth_set_rx_mode,
3051         .ndo_tx_timeout         = sh_eth_tx_timeout,
3052         .ndo_do_ioctl           = sh_eth_do_ioctl,
3053         .ndo_change_mtu         = sh_eth_change_mtu,
3054         .ndo_validate_addr      = eth_validate_addr,
3055         .ndo_set_mac_address    = eth_mac_addr,
3056 };
3057
3058 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
3059         .ndo_open               = sh_eth_open,
3060         .ndo_stop               = sh_eth_close,
3061         .ndo_start_xmit         = sh_eth_start_xmit,
3062         .ndo_get_stats          = sh_eth_get_stats,
3063         .ndo_set_rx_mode        = sh_eth_set_rx_mode,
3064         .ndo_vlan_rx_add_vid    = sh_eth_vlan_rx_add_vid,
3065         .ndo_vlan_rx_kill_vid   = sh_eth_vlan_rx_kill_vid,
3066         .ndo_tx_timeout         = sh_eth_tx_timeout,
3067         .ndo_do_ioctl           = sh_eth_do_ioctl,
3068         .ndo_change_mtu         = sh_eth_change_mtu,
3069         .ndo_validate_addr      = eth_validate_addr,
3070         .ndo_set_mac_address    = eth_mac_addr,
3071 };
3072
3073 #ifdef CONFIG_OF
3074 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3075 {
3076         struct device_node *np = dev->of_node;
3077         struct sh_eth_plat_data *pdata;
3078         const char *mac_addr;
3079
3080         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3081         if (!pdata)
3082                 return NULL;
3083
3084         pdata->phy_interface = of_get_phy_mode(np);
3085
3086         mac_addr = of_get_mac_address(np);
3087         if (mac_addr)
3088                 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
3089
3090         pdata->no_ether_link =
3091                 of_property_read_bool(np, "renesas,no-ether-link");
3092         pdata->ether_link_active_low =
3093                 of_property_read_bool(np, "renesas,ether-link-active-low");
3094
3095         return pdata;
3096 }
3097
3098 static const struct of_device_id sh_eth_match_table[] = {
3099         { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
3100         { .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data },
3101         { .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data },
3102         { .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data },
3103         { .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data },
3104         { .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data },
3105         { .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
3106         { .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
3107         { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
3108         { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3109         { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
3110         { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
3111         { }
3112 };
3113 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
3114 #else
3115 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3116 {
3117         return NULL;
3118 }
3119 #endif
3120
3121 static int sh_eth_drv_probe(struct platform_device *pdev)
3122 {
3123         struct resource *res;
3124         struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
3125         const struct platform_device_id *id = platform_get_device_id(pdev);
3126         struct sh_eth_private *mdp;
3127         struct net_device *ndev;
3128         int ret, devno;
3129
3130         /* get base addr */
3131         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3132
3133         ndev = alloc_etherdev(sizeof(struct sh_eth_private));
3134         if (!ndev)
3135                 return -ENOMEM;
3136
3137         pm_runtime_enable(&pdev->dev);
3138         pm_runtime_get_sync(&pdev->dev);
3139
3140         devno = pdev->id;
3141         if (devno < 0)
3142                 devno = 0;
3143
3144         ret = platform_get_irq(pdev, 0);
3145         if (ret < 0)
3146                 goto out_release;
3147         ndev->irq = ret;
3148
3149         SET_NETDEV_DEV(ndev, &pdev->dev);
3150
3151         mdp = netdev_priv(ndev);
3152         mdp->num_tx_ring = TX_RING_SIZE;
3153         mdp->num_rx_ring = RX_RING_SIZE;
3154         mdp->addr = devm_ioremap_resource(&pdev->dev, res);
3155         if (IS_ERR(mdp->addr)) {
3156                 ret = PTR_ERR(mdp->addr);
3157                 goto out_release;
3158         }
3159
3160         /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */
3161         mdp->clk = devm_clk_get(&pdev->dev, NULL);
3162         if (IS_ERR(mdp->clk))
3163                 mdp->clk = NULL;
3164
3165         ndev->base_addr = res->start;
3166
3167         spin_lock_init(&mdp->lock);
3168         mdp->pdev = pdev;
3169
3170         if (pdev->dev.of_node)
3171                 pd = sh_eth_parse_dt(&pdev->dev);
3172         if (!pd) {
3173                 dev_err(&pdev->dev, "no platform data\n");
3174                 ret = -EINVAL;
3175                 goto out_release;
3176         }
3177
3178         /* get PHY ID */
3179         mdp->phy_id = pd->phy;
3180         mdp->phy_interface = pd->phy_interface;
3181         mdp->no_ether_link = pd->no_ether_link;
3182         mdp->ether_link_active_low = pd->ether_link_active_low;
3183
3184         /* set cpu data */
3185         if (id)
3186                 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3187         else
3188                 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
3189
3190         mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3191         if (!mdp->reg_offset) {
3192                 dev_err(&pdev->dev, "Unknown register type (%d)\n",
3193                         mdp->cd->register_type);
3194                 ret = -EINVAL;
3195                 goto out_release;
3196         }
3197         sh_eth_set_default_cpu_data(mdp->cd);
3198
3199         /* User's manual states max MTU should be 2048 but due to the
3200          * alignment calculations in sh_eth_ring_init() the practical
3201          * MTU is a bit less. Maybe this can be optimized some more.
3202          */
3203         ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
3204         ndev->min_mtu = ETH_MIN_MTU;
3205
3206         /* set function */
3207         if (mdp->cd->tsu)
3208                 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
3209         else
3210                 ndev->netdev_ops = &sh_eth_netdev_ops;
3211         ndev->ethtool_ops = &sh_eth_ethtool_ops;
3212         ndev->watchdog_timeo = TX_TIMEOUT;
3213
3214         /* debug message level */
3215         mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3216
3217         /* read and set MAC address */
3218         read_mac_address(ndev, pd->mac_addr);
3219         if (!is_valid_ether_addr(ndev->dev_addr)) {
3220                 dev_warn(&pdev->dev,
3221                          "no valid MAC address supplied, using a random one.\n");
3222                 eth_hw_addr_random(ndev);
3223         }
3224
3225         /* ioremap the TSU registers */
3226         if (mdp->cd->tsu) {
3227                 struct resource *rtsu;
3228                 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3229                 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
3230                 if (IS_ERR(mdp->tsu_addr)) {
3231                         ret = PTR_ERR(mdp->tsu_addr);
3232                         goto out_release;
3233                 }
3234                 mdp->port = devno % 2;
3235                 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3236         }
3237
3238         /* initialize first or needed device */
3239         if (!devno || pd->needs_init) {
3240                 if (mdp->cd->chip_reset)
3241                         mdp->cd->chip_reset(ndev);
3242
3243                 if (mdp->cd->tsu) {
3244                         /* TSU init (Init only)*/
3245                         sh_eth_tsu_init(mdp);
3246                 }
3247         }
3248
3249         if (mdp->cd->rmiimode)
3250                 sh_eth_write(ndev, 0x1, RMIIMODE);
3251
3252         /* MDIO bus init */
3253         ret = sh_mdio_init(mdp, pd);
3254         if (ret) {
3255                 if (ret != -EPROBE_DEFER)
3256                         dev_err(&pdev->dev, "MDIO init failed: %d\n", ret);
3257                 goto out_release;
3258         }
3259
3260         netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
3261
3262         /* network device register */
3263         ret = register_netdev(ndev);
3264         if (ret)
3265                 goto out_napi_del;
3266
3267         if (mdp->cd->magic && mdp->clk)
3268                 device_set_wakeup_capable(&pdev->dev, 1);
3269
3270         /* print device information */
3271         netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
3272                     (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3273
3274         pm_runtime_put(&pdev->dev);
3275         platform_set_drvdata(pdev, ndev);
3276
3277         return ret;
3278
3279 out_napi_del:
3280         netif_napi_del(&mdp->napi);
3281         sh_mdio_release(mdp);
3282
3283 out_release:
3284         /* net_dev free */
3285         if (ndev)
3286                 free_netdev(ndev);
3287
3288         pm_runtime_put(&pdev->dev);
3289         pm_runtime_disable(&pdev->dev);
3290         return ret;
3291 }
3292
3293 static int sh_eth_drv_remove(struct platform_device *pdev)
3294 {
3295         struct net_device *ndev = platform_get_drvdata(pdev);
3296         struct sh_eth_private *mdp = netdev_priv(ndev);
3297
3298         unregister_netdev(ndev);
3299         netif_napi_del(&mdp->napi);
3300         sh_mdio_release(mdp);
3301         pm_runtime_disable(&pdev->dev);
3302         free_netdev(ndev);
3303
3304         return 0;
3305 }
3306
3307 #ifdef CONFIG_PM
3308 #ifdef CONFIG_PM_SLEEP
3309 static int sh_eth_wol_setup(struct net_device *ndev)
3310 {
3311         struct sh_eth_private *mdp = netdev_priv(ndev);
3312
3313         /* Only allow ECI interrupts */
3314         synchronize_irq(ndev->irq);
3315         napi_disable(&mdp->napi);
3316         sh_eth_write(ndev, EESIPR_ECIIP, EESIPR);
3317
3318         /* Enable MagicPacket */
3319         sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
3320
3321         /* Increased clock usage so device won't be suspended */
3322         clk_enable(mdp->clk);
3323
3324         return enable_irq_wake(ndev->irq);
3325 }
3326
3327 static int sh_eth_wol_restore(struct net_device *ndev)
3328 {
3329         struct sh_eth_private *mdp = netdev_priv(ndev);
3330         int ret;
3331
3332         napi_enable(&mdp->napi);
3333
3334         /* Disable MagicPacket */
3335         sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0);
3336
3337         /* The device needs to be reset to restore MagicPacket logic
3338          * for next wakeup. If we close and open the device it will
3339          * both be reset and all registers restored. This is what
3340          * happens during suspend and resume without WoL enabled.
3341          */
3342         ret = sh_eth_close(ndev);
3343         if (ret < 0)
3344                 return ret;
3345         ret = sh_eth_open(ndev);
3346         if (ret < 0)
3347                 return ret;
3348
3349         /* Restore clock usage count */
3350         clk_disable(mdp->clk);
3351
3352         return disable_irq_wake(ndev->irq);
3353 }
3354
3355 static int sh_eth_suspend(struct device *dev)
3356 {
3357         struct net_device *ndev = dev_get_drvdata(dev);
3358         struct sh_eth_private *mdp = netdev_priv(ndev);
3359         int ret = 0;
3360
3361         if (!netif_running(ndev))
3362                 return 0;
3363
3364         netif_device_detach(ndev);
3365
3366         if (mdp->wol_enabled)
3367                 ret = sh_eth_wol_setup(ndev);
3368         else
3369                 ret = sh_eth_close(ndev);
3370
3371         return ret;
3372 }
3373
3374 static int sh_eth_resume(struct device *dev)
3375 {
3376         struct net_device *ndev = dev_get_drvdata(dev);
3377         struct sh_eth_private *mdp = netdev_priv(ndev);
3378         int ret = 0;
3379
3380         if (!netif_running(ndev))
3381                 return 0;
3382
3383         if (mdp->wol_enabled)
3384                 ret = sh_eth_wol_restore(ndev);
3385         else
3386                 ret = sh_eth_open(ndev);
3387
3388         if (ret < 0)
3389                 return ret;
3390
3391         netif_device_attach(ndev);
3392
3393         return ret;
3394 }
3395 #endif
3396
3397 static int sh_eth_runtime_nop(struct device *dev)
3398 {
3399         /* Runtime PM callback shared between ->runtime_suspend()
3400          * and ->runtime_resume(). Simply returns success.
3401          *
3402          * This driver re-initializes all registers after
3403          * pm_runtime_get_sync() anyway so there is no need
3404          * to save and restore registers here.
3405          */
3406         return 0;
3407 }
3408
3409 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3410         SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3411         SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3412 };
3413 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3414 #else
3415 #define SH_ETH_PM_OPS NULL
3416 #endif
3417
3418 static const struct platform_device_id sh_eth_id_table[] = {
3419         { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3420         { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3421         { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3422         { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3423         { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3424         { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3425         { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3426         { }
3427 };
3428 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3429
3430 static struct platform_driver sh_eth_driver = {
3431         .probe = sh_eth_drv_probe,
3432         .remove = sh_eth_drv_remove,
3433         .id_table = sh_eth_id_table,
3434         .driver = {
3435                    .name = CARDNAME,
3436                    .pm = SH_ETH_PM_OPS,
3437                    .of_match_table = of_match_ptr(sh_eth_match_table),
3438         },
3439 };
3440
3441 module_platform_driver(sh_eth_driver);
3442
3443 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3444 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3445 MODULE_LICENSE("GPL v2");