Merge branch 'for-linus' of git://android.kernel.org/kernel/tegra
[sfrench/cifs-2.6.git] / drivers / net / ibm_newemac / core.c
1 /*
2  * drivers/net/ibm_newemac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7  *                <benh@kernel.crashing.org>
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13  *
14  * Based on original work by
15  *      Matt Porter <mporter@kernel.crashing.org>
16  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17  *      Armin Kuster <akuster@mvista.com>
18  *      Johnnie Peters <jpeters@mvista.com>
19  *
20  * This program is free software; you can redistribute  it and/or modify it
21  * under  the terms of  the GNU General  Public License as published by the
22  * Free Software Foundation;  either version 2 of the  License, or (at your
23  * option) any later version.
24  *
25  */
26
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
41 #include <linux/of.h>
42 #include <linux/slab.h>
43
44 #include <asm/processor.h>
45 #include <asm/io.h>
46 #include <asm/dma.h>
47 #include <asm/uaccess.h>
48 #include <asm/dcr.h>
49 #include <asm/dcr-regs.h>
50
51 #include "core.h"
52
53 /*
54  * Lack of dma_unmap_???? calls is intentional.
55  *
56  * API-correct usage requires additional support state information to be
57  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
58  * EMAC design (e.g. TX buffer passed from network stack can be split into
59  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
60  * maintaining such information will add additional overhead.
61  * Current DMA API implementation for 4xx processors only ensures cache coherency
62  * and dma_unmap_???? routines are empty and are likely to stay this way.
63  * I decided to omit dma_unmap_??? calls because I don't want to add additional
64  * complexity just for the sake of following some abstract API, when it doesn't
65  * add any real benefit to the driver. I understand that this decision maybe
66  * controversial, but I really tried to make code API-correct and efficient
67  * at the same time and didn't come up with code I liked :(.                --ebs
68  */
69
70 #define DRV_NAME        "emac"
71 #define DRV_VERSION     "3.54"
72 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
73
74 MODULE_DESCRIPTION(DRV_DESC);
75 MODULE_AUTHOR
76     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
77 MODULE_LICENSE("GPL");
78
79 /*
80  * PPC64 doesn't (yet) have a cacheable_memcpy
81  */
82 #ifdef CONFIG_PPC64
83 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
84 #endif
85
86 /* minimum number of free TX descriptors required to wake up TX process */
87 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
88
89 /* If packet size is less than this number, we allocate small skb and copy packet
90  * contents into it instead of just sending original big skb up
91  */
92 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
93
94 /* Since multiple EMACs share MDIO lines in various ways, we need
95  * to avoid re-using the same PHY ID in cases where the arch didn't
96  * setup precise phy_map entries
97  *
98  * XXX This is something that needs to be reworked as we can have multiple
99  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
100  * probably require in that case to have explicit PHY IDs in the device-tree
101  */
102 static u32 busy_phy_map;
103 static DEFINE_MUTEX(emac_phy_map_lock);
104
105 /* This is the wait queue used to wait on any event related to probe, that
106  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
107  */
108 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
109
110 /* Having stable interface names is a doomed idea. However, it would be nice
111  * if we didn't have completely random interface names at boot too :-) It's
112  * just a matter of making everybody's life easier. Since we are doing
113  * threaded probing, it's a bit harder though. The base idea here is that
114  * we make up a list of all emacs in the device-tree before we register the
115  * driver. Every emac will then wait for the previous one in the list to
116  * initialize before itself. We should also keep that list ordered by
117  * cell_index.
118  * That list is only 4 entries long, meaning that additional EMACs don't
119  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
120  */
121
122 #define EMAC_BOOT_LIST_SIZE     4
123 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
124
125 /* How long should I wait for dependent devices ? */
126 #define EMAC_PROBE_DEP_TIMEOUT  (HZ * 5)
127
128 /* I don't want to litter system log with timeout errors
129  * when we have brain-damaged PHY.
130  */
131 static inline void emac_report_timeout_error(struct emac_instance *dev,
132                                              const char *error)
133 {
134         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
135                                   EMAC_FTR_460EX_PHY_CLK_FIX |
136                                   EMAC_FTR_440EP_PHY_CLK_FIX))
137                 DBG(dev, "%s" NL, error);
138         else if (net_ratelimit())
139                 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
140                         error);
141 }
142
143 /* EMAC PHY clock workaround:
144  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
145  * which allows controlling each EMAC clock
146  */
147 static inline void emac_rx_clk_tx(struct emac_instance *dev)
148 {
149 #ifdef CONFIG_PPC_DCR_NATIVE
150         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
151                 dcri_clrset(SDR0, SDR0_MFR,
152                             0, SDR0_MFR_ECS >> dev->cell_index);
153 #endif
154 }
155
156 static inline void emac_rx_clk_default(struct emac_instance *dev)
157 {
158 #ifdef CONFIG_PPC_DCR_NATIVE
159         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
160                 dcri_clrset(SDR0, SDR0_MFR,
161                             SDR0_MFR_ECS >> dev->cell_index, 0);
162 #endif
163 }
164
165 /* PHY polling intervals */
166 #define PHY_POLL_LINK_ON        HZ
167 #define PHY_POLL_LINK_OFF       (HZ / 5)
168
169 /* Graceful stop timeouts in us.
170  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
171  */
172 #define STOP_TIMEOUT_10         1230
173 #define STOP_TIMEOUT_100        124
174 #define STOP_TIMEOUT_1000       13
175 #define STOP_TIMEOUT_1000_JUMBO 73
176
177 static unsigned char default_mcast_addr[] = {
178         0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
179 };
180
181 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
182 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
183         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
184         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
185         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
186         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
187         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
188         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
189         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
190         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
191         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
192         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
193         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
194         "tx_bd_excessive_collisions", "tx_bd_late_collision",
195         "tx_bd_multple_collisions", "tx_bd_single_collision",
196         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
197         "tx_errors"
198 };
199
200 static irqreturn_t emac_irq(int irq, void *dev_instance);
201 static void emac_clean_tx_ring(struct emac_instance *dev);
202 static void __emac_set_multicast_list(struct emac_instance *dev);
203
204 static inline int emac_phy_supports_gige(int phy_mode)
205 {
206         return  phy_mode == PHY_MODE_GMII ||
207                 phy_mode == PHY_MODE_RGMII ||
208                 phy_mode == PHY_MODE_SGMII ||
209                 phy_mode == PHY_MODE_TBI ||
210                 phy_mode == PHY_MODE_RTBI;
211 }
212
213 static inline int emac_phy_gpcs(int phy_mode)
214 {
215         return  phy_mode == PHY_MODE_SGMII ||
216                 phy_mode == PHY_MODE_TBI ||
217                 phy_mode == PHY_MODE_RTBI;
218 }
219
220 static inline void emac_tx_enable(struct emac_instance *dev)
221 {
222         struct emac_regs __iomem *p = dev->emacp;
223         u32 r;
224
225         DBG(dev, "tx_enable" NL);
226
227         r = in_be32(&p->mr0);
228         if (!(r & EMAC_MR0_TXE))
229                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
230 }
231
232 static void emac_tx_disable(struct emac_instance *dev)
233 {
234         struct emac_regs __iomem *p = dev->emacp;
235         u32 r;
236
237         DBG(dev, "tx_disable" NL);
238
239         r = in_be32(&p->mr0);
240         if (r & EMAC_MR0_TXE) {
241                 int n = dev->stop_timeout;
242                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
243                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
244                         udelay(1);
245                         --n;
246                 }
247                 if (unlikely(!n))
248                         emac_report_timeout_error(dev, "TX disable timeout");
249         }
250 }
251
252 static void emac_rx_enable(struct emac_instance *dev)
253 {
254         struct emac_regs __iomem *p = dev->emacp;
255         u32 r;
256
257         if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
258                 goto out;
259
260         DBG(dev, "rx_enable" NL);
261
262         r = in_be32(&p->mr0);
263         if (!(r & EMAC_MR0_RXE)) {
264                 if (unlikely(!(r & EMAC_MR0_RXI))) {
265                         /* Wait if previous async disable is still in progress */
266                         int n = dev->stop_timeout;
267                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
268                                 udelay(1);
269                                 --n;
270                         }
271                         if (unlikely(!n))
272                                 emac_report_timeout_error(dev,
273                                                           "RX disable timeout");
274                 }
275                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
276         }
277  out:
278         ;
279 }
280
281 static void emac_rx_disable(struct emac_instance *dev)
282 {
283         struct emac_regs __iomem *p = dev->emacp;
284         u32 r;
285
286         DBG(dev, "rx_disable" NL);
287
288         r = in_be32(&p->mr0);
289         if (r & EMAC_MR0_RXE) {
290                 int n = dev->stop_timeout;
291                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
292                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
293                         udelay(1);
294                         --n;
295                 }
296                 if (unlikely(!n))
297                         emac_report_timeout_error(dev, "RX disable timeout");
298         }
299 }
300
301 static inline void emac_netif_stop(struct emac_instance *dev)
302 {
303         netif_tx_lock_bh(dev->ndev);
304         netif_addr_lock(dev->ndev);
305         dev->no_mcast = 1;
306         netif_addr_unlock(dev->ndev);
307         netif_tx_unlock_bh(dev->ndev);
308         dev->ndev->trans_start = jiffies;       /* prevent tx timeout */
309         mal_poll_disable(dev->mal, &dev->commac);
310         netif_tx_disable(dev->ndev);
311 }
312
313 static inline void emac_netif_start(struct emac_instance *dev)
314 {
315         netif_tx_lock_bh(dev->ndev);
316         netif_addr_lock(dev->ndev);
317         dev->no_mcast = 0;
318         if (dev->mcast_pending && netif_running(dev->ndev))
319                 __emac_set_multicast_list(dev);
320         netif_addr_unlock(dev->ndev);
321         netif_tx_unlock_bh(dev->ndev);
322
323         netif_wake_queue(dev->ndev);
324
325         /* NOTE: unconditional netif_wake_queue is only appropriate
326          * so long as all callers are assured to have free tx slots
327          * (taken from tg3... though the case where that is wrong is
328          *  not terribly harmful)
329          */
330         mal_poll_enable(dev->mal, &dev->commac);
331 }
332
333 static inline void emac_rx_disable_async(struct emac_instance *dev)
334 {
335         struct emac_regs __iomem *p = dev->emacp;
336         u32 r;
337
338         DBG(dev, "rx_disable_async" NL);
339
340         r = in_be32(&p->mr0);
341         if (r & EMAC_MR0_RXE)
342                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
343 }
344
345 static int emac_reset(struct emac_instance *dev)
346 {
347         struct emac_regs __iomem *p = dev->emacp;
348         int n = 20;
349
350         DBG(dev, "reset" NL);
351
352         if (!dev->reset_failed) {
353                 /* 40x erratum suggests stopping RX channel before reset,
354                  * we stop TX as well
355                  */
356                 emac_rx_disable(dev);
357                 emac_tx_disable(dev);
358         }
359
360 #ifdef CONFIG_PPC_DCR_NATIVE
361         /* Enable internal clock source */
362         if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
363                 dcri_clrset(SDR0, SDR0_ETH_CFG,
364                             0, SDR0_ETH_CFG_ECS << dev->cell_index);
365 #endif
366
367         out_be32(&p->mr0, EMAC_MR0_SRST);
368         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
369                 --n;
370
371 #ifdef CONFIG_PPC_DCR_NATIVE
372          /* Enable external clock source */
373         if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
374                 dcri_clrset(SDR0, SDR0_ETH_CFG,
375                             SDR0_ETH_CFG_ECS << dev->cell_index, 0);
376 #endif
377
378         if (n) {
379                 dev->reset_failed = 0;
380                 return 0;
381         } else {
382                 emac_report_timeout_error(dev, "reset timeout");
383                 dev->reset_failed = 1;
384                 return -ETIMEDOUT;
385         }
386 }
387
388 static void emac_hash_mc(struct emac_instance *dev)
389 {
390         const int regs = EMAC_XAHT_REGS(dev);
391         u32 *gaht_base = emac_gaht_base(dev);
392         u32 gaht_temp[regs];
393         struct netdev_hw_addr *ha;
394         int i;
395
396         DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
397
398         memset(gaht_temp, 0, sizeof (gaht_temp));
399
400         netdev_for_each_mc_addr(ha, dev->ndev) {
401                 int slot, reg, mask;
402                 DBG2(dev, "mc %pM" NL, ha->addr);
403
404                 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
405                                              ether_crc(ETH_ALEN, ha->addr));
406                 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
407                 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
408
409                 gaht_temp[reg] |= mask;
410         }
411
412         for (i = 0; i < regs; i++)
413                 out_be32(gaht_base + i, gaht_temp[i]);
414 }
415
416 static inline u32 emac_iff2rmr(struct net_device *ndev)
417 {
418         struct emac_instance *dev = netdev_priv(ndev);
419         u32 r;
420
421         r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
422
423         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
424             r |= EMAC4_RMR_BASE;
425         else
426             r |= EMAC_RMR_BASE;
427
428         if (ndev->flags & IFF_PROMISC)
429                 r |= EMAC_RMR_PME;
430         else if (ndev->flags & IFF_ALLMULTI ||
431                          (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
432                 r |= EMAC_RMR_PMME;
433         else if (!netdev_mc_empty(ndev))
434                 r |= EMAC_RMR_MAE;
435
436         return r;
437 }
438
439 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
440 {
441         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
442
443         DBG2(dev, "__emac_calc_base_mr1" NL);
444
445         switch(tx_size) {
446         case 2048:
447                 ret |= EMAC_MR1_TFS_2K;
448                 break;
449         default:
450                 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
451                        dev->ndev->name, tx_size);
452         }
453
454         switch(rx_size) {
455         case 16384:
456                 ret |= EMAC_MR1_RFS_16K;
457                 break;
458         case 4096:
459                 ret |= EMAC_MR1_RFS_4K;
460                 break;
461         default:
462                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
463                        dev->ndev->name, rx_size);
464         }
465
466         return ret;
467 }
468
469 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
470 {
471         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
472                 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
473
474         DBG2(dev, "__emac4_calc_base_mr1" NL);
475
476         switch(tx_size) {
477         case 16384:
478                 ret |= EMAC4_MR1_TFS_16K;
479                 break;
480         case 4096:
481                 ret |= EMAC4_MR1_TFS_4K;
482                 break;
483         case 2048:
484                 ret |= EMAC4_MR1_TFS_2K;
485                 break;
486         default:
487                 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
488                        dev->ndev->name, tx_size);
489         }
490
491         switch(rx_size) {
492         case 16384:
493                 ret |= EMAC4_MR1_RFS_16K;
494                 break;
495         case 4096:
496                 ret |= EMAC4_MR1_RFS_4K;
497                 break;
498         case 2048:
499                 ret |= EMAC4_MR1_RFS_2K;
500                 break;
501         default:
502                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
503                        dev->ndev->name, rx_size);
504         }
505
506         return ret;
507 }
508
509 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
510 {
511         return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
512                 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
513                 __emac_calc_base_mr1(dev, tx_size, rx_size);
514 }
515
516 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
517 {
518         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
519                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
520         else
521                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
522 }
523
524 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
525                                  unsigned int low, unsigned int high)
526 {
527         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
528                 return (low << 22) | ( (high & 0x3ff) << 6);
529         else
530                 return (low << 23) | ( (high & 0x1ff) << 7);
531 }
532
533 static int emac_configure(struct emac_instance *dev)
534 {
535         struct emac_regs __iomem *p = dev->emacp;
536         struct net_device *ndev = dev->ndev;
537         int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
538         u32 r, mr1 = 0;
539
540         DBG(dev, "configure" NL);
541
542         if (!link) {
543                 out_be32(&p->mr1, in_be32(&p->mr1)
544                          | EMAC_MR1_FDE | EMAC_MR1_ILE);
545                 udelay(100);
546         } else if (emac_reset(dev) < 0)
547                 return -ETIMEDOUT;
548
549         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
550                 tah_reset(dev->tah_dev);
551
552         DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
553             link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
554
555         /* Default fifo sizes */
556         tx_size = dev->tx_fifo_size;
557         rx_size = dev->rx_fifo_size;
558
559         /* No link, force loopback */
560         if (!link)
561                 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
562
563         /* Check for full duplex */
564         else if (dev->phy.duplex == DUPLEX_FULL)
565                 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
566
567         /* Adjust fifo sizes, mr1 and timeouts based on link speed */
568         dev->stop_timeout = STOP_TIMEOUT_10;
569         switch (dev->phy.speed) {
570         case SPEED_1000:
571                 if (emac_phy_gpcs(dev->phy.mode)) {
572                         mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
573                                 (dev->phy.gpcs_address != 0xffffffff) ?
574                                  dev->phy.gpcs_address : dev->phy.address);
575
576                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
577                          * identify this GPCS PHY later.
578                          */
579                         out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
580                 } else
581                         mr1 |= EMAC_MR1_MF_1000;
582
583                 /* Extended fifo sizes */
584                 tx_size = dev->tx_fifo_size_gige;
585                 rx_size = dev->rx_fifo_size_gige;
586
587                 if (dev->ndev->mtu > ETH_DATA_LEN) {
588                         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
589                                 mr1 |= EMAC4_MR1_JPSM;
590                         else
591                                 mr1 |= EMAC_MR1_JPSM;
592                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
593                 } else
594                         dev->stop_timeout = STOP_TIMEOUT_1000;
595                 break;
596         case SPEED_100:
597                 mr1 |= EMAC_MR1_MF_100;
598                 dev->stop_timeout = STOP_TIMEOUT_100;
599                 break;
600         default: /* make gcc happy */
601                 break;
602         }
603
604         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
605                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
606                                 dev->phy.speed);
607         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
608                 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
609
610         /* on 40x erratum forces us to NOT use integrated flow control,
611          * let's hope it works on 44x ;)
612          */
613         if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
614             dev->phy.duplex == DUPLEX_FULL) {
615                 if (dev->phy.pause)
616                         mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
617                 else if (dev->phy.asym_pause)
618                         mr1 |= EMAC_MR1_APP;
619         }
620
621         /* Add base settings & fifo sizes & program MR1 */
622         mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
623         out_be32(&p->mr1, mr1);
624
625         /* Set individual MAC address */
626         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
627         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
628                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
629                  ndev->dev_addr[5]);
630
631         /* VLAN Tag Protocol ID */
632         out_be32(&p->vtpid, 0x8100);
633
634         /* Receive mode register */
635         r = emac_iff2rmr(ndev);
636         if (r & EMAC_RMR_MAE)
637                 emac_hash_mc(dev);
638         out_be32(&p->rmr, r);
639
640         /* FIFOs thresholds */
641         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
642                 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
643                                tx_size / 2 / dev->fifo_entry_size);
644         else
645                 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
646                               tx_size / 2 / dev->fifo_entry_size);
647         out_be32(&p->tmr1, r);
648         out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
649
650         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
651            there should be still enough space in FIFO to allow the our link
652            partner time to process this frame and also time to send PAUSE
653            frame itself.
654
655            Here is the worst case scenario for the RX FIFO "headroom"
656            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
657
658            1) One maximum-length frame on TX                    1522 bytes
659            2) One PAUSE frame time                                64 bytes
660            3) PAUSE frame decode time allowance                   64 bytes
661            4) One maximum-length frame on RX                    1522 bytes
662            5) Round-trip propagation delay of the link (100Mb)    15 bytes
663            ----------
664            3187 bytes
665
666            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
667            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
668          */
669         r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
670                            rx_size / 4 / dev->fifo_entry_size);
671         out_be32(&p->rwmr, r);
672
673         /* Set PAUSE timer to the maximum */
674         out_be32(&p->ptr, 0xffff);
675
676         /* IRQ sources */
677         r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
678                 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
679                 EMAC_ISR_IRE | EMAC_ISR_TE;
680         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
681             r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
682                                                   EMAC4_ISR_RXOE | */;
683         out_be32(&p->iser,  r);
684
685         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
686         if (emac_phy_gpcs(dev->phy.mode)) {
687                 if (dev->phy.gpcs_address != 0xffffffff)
688                         emac_mii_reset_gpcs(&dev->phy);
689                 else
690                         emac_mii_reset_phy(&dev->phy);
691         }
692
693         return 0;
694 }
695
696 static void emac_reinitialize(struct emac_instance *dev)
697 {
698         DBG(dev, "reinitialize" NL);
699
700         emac_netif_stop(dev);
701         if (!emac_configure(dev)) {
702                 emac_tx_enable(dev);
703                 emac_rx_enable(dev);
704         }
705         emac_netif_start(dev);
706 }
707
708 static void emac_full_tx_reset(struct emac_instance *dev)
709 {
710         DBG(dev, "full_tx_reset" NL);
711
712         emac_tx_disable(dev);
713         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
714         emac_clean_tx_ring(dev);
715         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
716
717         emac_configure(dev);
718
719         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
720         emac_tx_enable(dev);
721         emac_rx_enable(dev);
722 }
723
724 static void emac_reset_work(struct work_struct *work)
725 {
726         struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
727
728         DBG(dev, "reset_work" NL);
729
730         mutex_lock(&dev->link_lock);
731         if (dev->opened) {
732                 emac_netif_stop(dev);
733                 emac_full_tx_reset(dev);
734                 emac_netif_start(dev);
735         }
736         mutex_unlock(&dev->link_lock);
737 }
738
739 static void emac_tx_timeout(struct net_device *ndev)
740 {
741         struct emac_instance *dev = netdev_priv(ndev);
742
743         DBG(dev, "tx_timeout" NL);
744
745         schedule_work(&dev->reset_work);
746 }
747
748
749 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
750 {
751         int done = !!(stacr & EMAC_STACR_OC);
752
753         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
754                 done = !done;
755
756         return done;
757 };
758
759 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
760 {
761         struct emac_regs __iomem *p = dev->emacp;
762         u32 r = 0;
763         int n, err = -ETIMEDOUT;
764
765         mutex_lock(&dev->mdio_lock);
766
767         DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
768
769         /* Enable proper MDIO port */
770         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
771                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
772         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
773                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
774
775         /* Wait for management interface to become idle */
776         n = 20;
777         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
778                 udelay(1);
779                 if (!--n) {
780                         DBG2(dev, " -> timeout wait idle\n");
781                         goto bail;
782                 }
783         }
784
785         /* Issue read command */
786         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
787                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
788         else
789                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
790         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
791                 r |= EMAC_STACR_OC;
792         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
793                 r |= EMACX_STACR_STAC_READ;
794         else
795                 r |= EMAC_STACR_STAC_READ;
796         r |= (reg & EMAC_STACR_PRA_MASK)
797                 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
798         out_be32(&p->stacr, r);
799
800         /* Wait for read to complete */
801         n = 200;
802         while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
803                 udelay(1);
804                 if (!--n) {
805                         DBG2(dev, " -> timeout wait complete\n");
806                         goto bail;
807                 }
808         }
809
810         if (unlikely(r & EMAC_STACR_PHYE)) {
811                 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
812                 err = -EREMOTEIO;
813                 goto bail;
814         }
815
816         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
817
818         DBG2(dev, "mdio_read -> %04x" NL, r);
819         err = 0;
820  bail:
821         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
822                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
823         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
824                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
825         mutex_unlock(&dev->mdio_lock);
826
827         return err == 0 ? r : err;
828 }
829
830 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
831                               u16 val)
832 {
833         struct emac_regs __iomem *p = dev->emacp;
834         u32 r = 0;
835         int n, err = -ETIMEDOUT;
836
837         mutex_lock(&dev->mdio_lock);
838
839         DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
840
841         /* Enable proper MDIO port */
842         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
843                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
844         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
845                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
846
847         /* Wait for management interface to be idle */
848         n = 20;
849         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
850                 udelay(1);
851                 if (!--n) {
852                         DBG2(dev, " -> timeout wait idle\n");
853                         goto bail;
854                 }
855         }
856
857         /* Issue write command */
858         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
859                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
860         else
861                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
862         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
863                 r |= EMAC_STACR_OC;
864         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
865                 r |= EMACX_STACR_STAC_WRITE;
866         else
867                 r |= EMAC_STACR_STAC_WRITE;
868         r |= (reg & EMAC_STACR_PRA_MASK) |
869                 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
870                 (val << EMAC_STACR_PHYD_SHIFT);
871         out_be32(&p->stacr, r);
872
873         /* Wait for write to complete */
874         n = 200;
875         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
876                 udelay(1);
877                 if (!--n) {
878                         DBG2(dev, " -> timeout wait complete\n");
879                         goto bail;
880                 }
881         }
882         err = 0;
883  bail:
884         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
885                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
886         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
887                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
888         mutex_unlock(&dev->mdio_lock);
889 }
890
891 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
892 {
893         struct emac_instance *dev = netdev_priv(ndev);
894         int res;
895
896         res = __emac_mdio_read((dev->mdio_instance &&
897                                 dev->phy.gpcs_address != id) ?
898                                 dev->mdio_instance : dev,
899                                (u8) id, (u8) reg);
900         return res;
901 }
902
903 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
904 {
905         struct emac_instance *dev = netdev_priv(ndev);
906
907         __emac_mdio_write((dev->mdio_instance &&
908                            dev->phy.gpcs_address != id) ?
909                            dev->mdio_instance : dev,
910                           (u8) id, (u8) reg, (u16) val);
911 }
912
913 /* Tx lock BH */
914 static void __emac_set_multicast_list(struct emac_instance *dev)
915 {
916         struct emac_regs __iomem *p = dev->emacp;
917         u32 rmr = emac_iff2rmr(dev->ndev);
918
919         DBG(dev, "__multicast %08x" NL, rmr);
920
921         /* I decided to relax register access rules here to avoid
922          * full EMAC reset.
923          *
924          * There is a real problem with EMAC4 core if we use MWSW_001 bit
925          * in MR1 register and do a full EMAC reset.
926          * One TX BD status update is delayed and, after EMAC reset, it
927          * never happens, resulting in TX hung (it'll be recovered by TX
928          * timeout handler eventually, but this is just gross).
929          * So we either have to do full TX reset or try to cheat here :)
930          *
931          * The only required change is to RX mode register, so I *think* all
932          * we need is just to stop RX channel. This seems to work on all
933          * tested SoCs.                                                --ebs
934          *
935          * If we need the full reset, we might just trigger the workqueue
936          * and do it async... a bit nasty but should work --BenH
937          */
938         dev->mcast_pending = 0;
939         emac_rx_disable(dev);
940         if (rmr & EMAC_RMR_MAE)
941                 emac_hash_mc(dev);
942         out_be32(&p->rmr, rmr);
943         emac_rx_enable(dev);
944 }
945
946 /* Tx lock BH */
947 static void emac_set_multicast_list(struct net_device *ndev)
948 {
949         struct emac_instance *dev = netdev_priv(ndev);
950
951         DBG(dev, "multicast" NL);
952
953         BUG_ON(!netif_running(dev->ndev));
954
955         if (dev->no_mcast) {
956                 dev->mcast_pending = 1;
957                 return;
958         }
959         __emac_set_multicast_list(dev);
960 }
961
962 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
963 {
964         int rx_sync_size = emac_rx_sync_size(new_mtu);
965         int rx_skb_size = emac_rx_skb_size(new_mtu);
966         int i, ret = 0;
967
968         mutex_lock(&dev->link_lock);
969         emac_netif_stop(dev);
970         emac_rx_disable(dev);
971         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
972
973         if (dev->rx_sg_skb) {
974                 ++dev->estats.rx_dropped_resize;
975                 dev_kfree_skb(dev->rx_sg_skb);
976                 dev->rx_sg_skb = NULL;
977         }
978
979         /* Make a first pass over RX ring and mark BDs ready, dropping
980          * non-processed packets on the way. We need this as a separate pass
981          * to simplify error recovery in the case of allocation failure later.
982          */
983         for (i = 0; i < NUM_RX_BUFF; ++i) {
984                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
985                         ++dev->estats.rx_dropped_resize;
986
987                 dev->rx_desc[i].data_len = 0;
988                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
989                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
990         }
991
992         /* Reallocate RX ring only if bigger skb buffers are required */
993         if (rx_skb_size <= dev->rx_skb_size)
994                 goto skip;
995
996         /* Second pass, allocate new skbs */
997         for (i = 0; i < NUM_RX_BUFF; ++i) {
998                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
999                 if (!skb) {
1000                         ret = -ENOMEM;
1001                         goto oom;
1002                 }
1003
1004                 BUG_ON(!dev->rx_skb[i]);
1005                 dev_kfree_skb(dev->rx_skb[i]);
1006
1007                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1008                 dev->rx_desc[i].data_ptr =
1009                     dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1010                                    DMA_FROM_DEVICE) + 2;
1011                 dev->rx_skb[i] = skb;
1012         }
1013  skip:
1014         /* Check if we need to change "Jumbo" bit in MR1 */
1015         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
1016                 /* This is to prevent starting RX channel in emac_rx_enable() */
1017                 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1018
1019                 dev->ndev->mtu = new_mtu;
1020                 emac_full_tx_reset(dev);
1021         }
1022
1023         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1024  oom:
1025         /* Restart RX */
1026         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1027         dev->rx_slot = 0;
1028         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1029         emac_rx_enable(dev);
1030         emac_netif_start(dev);
1031         mutex_unlock(&dev->link_lock);
1032
1033         return ret;
1034 }
1035
1036 /* Process ctx, rtnl_lock semaphore */
1037 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1038 {
1039         struct emac_instance *dev = netdev_priv(ndev);
1040         int ret = 0;
1041
1042         if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1043                 return -EINVAL;
1044
1045         DBG(dev, "change_mtu(%d)" NL, new_mtu);
1046
1047         if (netif_running(ndev)) {
1048                 /* Check if we really need to reinitialize RX ring */
1049                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1050                         ret = emac_resize_rx_ring(dev, new_mtu);
1051         }
1052
1053         if (!ret) {
1054                 ndev->mtu = new_mtu;
1055                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1056                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1057         }
1058
1059         return ret;
1060 }
1061
1062 static void emac_clean_tx_ring(struct emac_instance *dev)
1063 {
1064         int i;
1065
1066         for (i = 0; i < NUM_TX_BUFF; ++i) {
1067                 if (dev->tx_skb[i]) {
1068                         dev_kfree_skb(dev->tx_skb[i]);
1069                         dev->tx_skb[i] = NULL;
1070                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1071                                 ++dev->estats.tx_dropped;
1072                 }
1073                 dev->tx_desc[i].ctrl = 0;
1074                 dev->tx_desc[i].data_ptr = 0;
1075         }
1076 }
1077
1078 static void emac_clean_rx_ring(struct emac_instance *dev)
1079 {
1080         int i;
1081
1082         for (i = 0; i < NUM_RX_BUFF; ++i)
1083                 if (dev->rx_skb[i]) {
1084                         dev->rx_desc[i].ctrl = 0;
1085                         dev_kfree_skb(dev->rx_skb[i]);
1086                         dev->rx_skb[i] = NULL;
1087                         dev->rx_desc[i].data_ptr = 0;
1088                 }
1089
1090         if (dev->rx_sg_skb) {
1091                 dev_kfree_skb(dev->rx_sg_skb);
1092                 dev->rx_sg_skb = NULL;
1093         }
1094 }
1095
1096 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1097                                     gfp_t flags)
1098 {
1099         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1100         if (unlikely(!skb))
1101                 return -ENOMEM;
1102
1103         dev->rx_skb[slot] = skb;
1104         dev->rx_desc[slot].data_len = 0;
1105
1106         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1107         dev->rx_desc[slot].data_ptr =
1108             dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1109                            DMA_FROM_DEVICE) + 2;
1110         wmb();
1111         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1112             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1113
1114         return 0;
1115 }
1116
1117 static void emac_print_link_status(struct emac_instance *dev)
1118 {
1119         if (netif_carrier_ok(dev->ndev))
1120                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1121                        dev->ndev->name, dev->phy.speed,
1122                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1123                        dev->phy.pause ? ", pause enabled" :
1124                        dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1125         else
1126                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1127 }
1128
1129 /* Process ctx, rtnl_lock semaphore */
1130 static int emac_open(struct net_device *ndev)
1131 {
1132         struct emac_instance *dev = netdev_priv(ndev);
1133         int err, i;
1134
1135         DBG(dev, "open" NL);
1136
1137         /* Setup error IRQ handler */
1138         err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1139         if (err) {
1140                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1141                        ndev->name, dev->emac_irq);
1142                 return err;
1143         }
1144
1145         /* Allocate RX ring */
1146         for (i = 0; i < NUM_RX_BUFF; ++i)
1147                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1148                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
1149                                ndev->name);
1150                         goto oom;
1151                 }
1152
1153         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1154         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1155         dev->rx_sg_skb = NULL;
1156
1157         mutex_lock(&dev->link_lock);
1158         dev->opened = 1;
1159
1160         /* Start PHY polling now.
1161          */
1162         if (dev->phy.address >= 0) {
1163                 int link_poll_interval;
1164                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1165                         dev->phy.def->ops->read_link(&dev->phy);
1166                         emac_rx_clk_default(dev);
1167                         netif_carrier_on(dev->ndev);
1168                         link_poll_interval = PHY_POLL_LINK_ON;
1169                 } else {
1170                         emac_rx_clk_tx(dev);
1171                         netif_carrier_off(dev->ndev);
1172                         link_poll_interval = PHY_POLL_LINK_OFF;
1173                 }
1174                 dev->link_polling = 1;
1175                 wmb();
1176                 schedule_delayed_work(&dev->link_work, link_poll_interval);
1177                 emac_print_link_status(dev);
1178         } else
1179                 netif_carrier_on(dev->ndev);
1180
1181         /* Required for Pause packet support in EMAC */
1182         dev_mc_add_global(ndev, default_mcast_addr);
1183
1184         emac_configure(dev);
1185         mal_poll_add(dev->mal, &dev->commac);
1186         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1187         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1188         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1189         emac_tx_enable(dev);
1190         emac_rx_enable(dev);
1191         emac_netif_start(dev);
1192
1193         mutex_unlock(&dev->link_lock);
1194
1195         return 0;
1196  oom:
1197         emac_clean_rx_ring(dev);
1198         free_irq(dev->emac_irq, dev);
1199
1200         return -ENOMEM;
1201 }
1202
1203 /* BHs disabled */
1204 #if 0
1205 static int emac_link_differs(struct emac_instance *dev)
1206 {
1207         u32 r = in_be32(&dev->emacp->mr1);
1208
1209         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1210         int speed, pause, asym_pause;
1211
1212         if (r & EMAC_MR1_MF_1000)
1213                 speed = SPEED_1000;
1214         else if (r & EMAC_MR1_MF_100)
1215                 speed = SPEED_100;
1216         else
1217                 speed = SPEED_10;
1218
1219         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1220         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1221                 pause = 1;
1222                 asym_pause = 0;
1223                 break;
1224         case EMAC_MR1_APP:
1225                 pause = 0;
1226                 asym_pause = 1;
1227                 break;
1228         default:
1229                 pause = asym_pause = 0;
1230         }
1231         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1232             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1233 }
1234 #endif
1235
1236 static void emac_link_timer(struct work_struct *work)
1237 {
1238         struct emac_instance *dev =
1239                 container_of(to_delayed_work(work),
1240                              struct emac_instance, link_work);
1241         int link_poll_interval;
1242
1243         mutex_lock(&dev->link_lock);
1244         DBG2(dev, "link timer" NL);
1245
1246         if (!dev->opened)
1247                 goto bail;
1248
1249         if (dev->phy.def->ops->poll_link(&dev->phy)) {
1250                 if (!netif_carrier_ok(dev->ndev)) {
1251                         emac_rx_clk_default(dev);
1252                         /* Get new link parameters */
1253                         dev->phy.def->ops->read_link(&dev->phy);
1254
1255                         netif_carrier_on(dev->ndev);
1256                         emac_netif_stop(dev);
1257                         emac_full_tx_reset(dev);
1258                         emac_netif_start(dev);
1259                         emac_print_link_status(dev);
1260                 }
1261                 link_poll_interval = PHY_POLL_LINK_ON;
1262         } else {
1263                 if (netif_carrier_ok(dev->ndev)) {
1264                         emac_rx_clk_tx(dev);
1265                         netif_carrier_off(dev->ndev);
1266                         netif_tx_disable(dev->ndev);
1267                         emac_reinitialize(dev);
1268                         emac_print_link_status(dev);
1269                 }
1270                 link_poll_interval = PHY_POLL_LINK_OFF;
1271         }
1272         schedule_delayed_work(&dev->link_work, link_poll_interval);
1273  bail:
1274         mutex_unlock(&dev->link_lock);
1275 }
1276
1277 static void emac_force_link_update(struct emac_instance *dev)
1278 {
1279         netif_carrier_off(dev->ndev);
1280         smp_rmb();
1281         if (dev->link_polling) {
1282                 cancel_rearming_delayed_work(&dev->link_work);
1283                 if (dev->link_polling)
1284                         schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1285         }
1286 }
1287
1288 /* Process ctx, rtnl_lock semaphore */
1289 static int emac_close(struct net_device *ndev)
1290 {
1291         struct emac_instance *dev = netdev_priv(ndev);
1292
1293         DBG(dev, "close" NL);
1294
1295         if (dev->phy.address >= 0) {
1296                 dev->link_polling = 0;
1297                 cancel_rearming_delayed_work(&dev->link_work);
1298         }
1299         mutex_lock(&dev->link_lock);
1300         emac_netif_stop(dev);
1301         dev->opened = 0;
1302         mutex_unlock(&dev->link_lock);
1303
1304         emac_rx_disable(dev);
1305         emac_tx_disable(dev);
1306         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1307         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1308         mal_poll_del(dev->mal, &dev->commac);
1309
1310         emac_clean_tx_ring(dev);
1311         emac_clean_rx_ring(dev);
1312
1313         free_irq(dev->emac_irq, dev);
1314
1315         netif_carrier_off(ndev);
1316
1317         return 0;
1318 }
1319
1320 static inline u16 emac_tx_csum(struct emac_instance *dev,
1321                                struct sk_buff *skb)
1322 {
1323         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1324                 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1325                 ++dev->stats.tx_packets_csum;
1326                 return EMAC_TX_CTRL_TAH_CSUM;
1327         }
1328         return 0;
1329 }
1330
1331 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1332 {
1333         struct emac_regs __iomem *p = dev->emacp;
1334         struct net_device *ndev = dev->ndev;
1335
1336         /* Send the packet out. If the if makes a significant perf
1337          * difference, then we can store the TMR0 value in "dev"
1338          * instead
1339          */
1340         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1341                 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1342         else
1343                 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1344
1345         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1346                 netif_stop_queue(ndev);
1347                 DBG2(dev, "stopped TX queue" NL);
1348         }
1349
1350         ndev->trans_start = jiffies;
1351         ++dev->stats.tx_packets;
1352         dev->stats.tx_bytes += len;
1353
1354         return NETDEV_TX_OK;
1355 }
1356
1357 /* Tx lock BH */
1358 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1359 {
1360         struct emac_instance *dev = netdev_priv(ndev);
1361         unsigned int len = skb->len;
1362         int slot;
1363
1364         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1365             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1366
1367         slot = dev->tx_slot++;
1368         if (dev->tx_slot == NUM_TX_BUFF) {
1369                 dev->tx_slot = 0;
1370                 ctrl |= MAL_TX_CTRL_WRAP;
1371         }
1372
1373         DBG2(dev, "xmit(%u) %d" NL, len, slot);
1374
1375         dev->tx_skb[slot] = skb;
1376         dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1377                                                      skb->data, len,
1378                                                      DMA_TO_DEVICE);
1379         dev->tx_desc[slot].data_len = (u16) len;
1380         wmb();
1381         dev->tx_desc[slot].ctrl = ctrl;
1382
1383         return emac_xmit_finish(dev, len);
1384 }
1385
1386 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1387                                   u32 pd, int len, int last, u16 base_ctrl)
1388 {
1389         while (1) {
1390                 u16 ctrl = base_ctrl;
1391                 int chunk = min(len, MAL_MAX_TX_SIZE);
1392                 len -= chunk;
1393
1394                 slot = (slot + 1) % NUM_TX_BUFF;
1395
1396                 if (last && !len)
1397                         ctrl |= MAL_TX_CTRL_LAST;
1398                 if (slot == NUM_TX_BUFF - 1)
1399                         ctrl |= MAL_TX_CTRL_WRAP;
1400
1401                 dev->tx_skb[slot] = NULL;
1402                 dev->tx_desc[slot].data_ptr = pd;
1403                 dev->tx_desc[slot].data_len = (u16) chunk;
1404                 dev->tx_desc[slot].ctrl = ctrl;
1405                 ++dev->tx_cnt;
1406
1407                 if (!len)
1408                         break;
1409
1410                 pd += chunk;
1411         }
1412         return slot;
1413 }
1414
1415 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1416 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1417 {
1418         struct emac_instance *dev = netdev_priv(ndev);
1419         int nr_frags = skb_shinfo(skb)->nr_frags;
1420         int len = skb->len, chunk;
1421         int slot, i;
1422         u16 ctrl;
1423         u32 pd;
1424
1425         /* This is common "fast" path */
1426         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1427                 return emac_start_xmit(skb, ndev);
1428
1429         len -= skb->data_len;
1430
1431         /* Note, this is only an *estimation*, we can still run out of empty
1432          * slots because of the additional fragmentation into
1433          * MAL_MAX_TX_SIZE-sized chunks
1434          */
1435         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1436                 goto stop_queue;
1437
1438         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1439             emac_tx_csum(dev, skb);
1440         slot = dev->tx_slot;
1441
1442         /* skb data */
1443         dev->tx_skb[slot] = NULL;
1444         chunk = min(len, MAL_MAX_TX_SIZE);
1445         dev->tx_desc[slot].data_ptr = pd =
1446             dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1447         dev->tx_desc[slot].data_len = (u16) chunk;
1448         len -= chunk;
1449         if (unlikely(len))
1450                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1451                                        ctrl);
1452         /* skb fragments */
1453         for (i = 0; i < nr_frags; ++i) {
1454                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1455                 len = frag->size;
1456
1457                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1458                         goto undo_frame;
1459
1460                 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1461                                   DMA_TO_DEVICE);
1462
1463                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1464                                        ctrl);
1465         }
1466
1467         DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1468
1469         /* Attach skb to the last slot so we don't release it too early */
1470         dev->tx_skb[slot] = skb;
1471
1472         /* Send the packet out */
1473         if (dev->tx_slot == NUM_TX_BUFF - 1)
1474                 ctrl |= MAL_TX_CTRL_WRAP;
1475         wmb();
1476         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1477         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1478
1479         return emac_xmit_finish(dev, skb->len);
1480
1481  undo_frame:
1482         /* Well, too bad. Our previous estimation was overly optimistic.
1483          * Undo everything.
1484          */
1485         while (slot != dev->tx_slot) {
1486                 dev->tx_desc[slot].ctrl = 0;
1487                 --dev->tx_cnt;
1488                 if (--slot < 0)
1489                         slot = NUM_TX_BUFF - 1;
1490         }
1491         ++dev->estats.tx_undo;
1492
1493  stop_queue:
1494         netif_stop_queue(ndev);
1495         DBG2(dev, "stopped TX queue" NL);
1496         return NETDEV_TX_BUSY;
1497 }
1498
1499 /* Tx lock BHs */
1500 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1501 {
1502         struct emac_error_stats *st = &dev->estats;
1503
1504         DBG(dev, "BD TX error %04x" NL, ctrl);
1505
1506         ++st->tx_bd_errors;
1507         if (ctrl & EMAC_TX_ST_BFCS)
1508                 ++st->tx_bd_bad_fcs;
1509         if (ctrl & EMAC_TX_ST_LCS)
1510                 ++st->tx_bd_carrier_loss;
1511         if (ctrl & EMAC_TX_ST_ED)
1512                 ++st->tx_bd_excessive_deferral;
1513         if (ctrl & EMAC_TX_ST_EC)
1514                 ++st->tx_bd_excessive_collisions;
1515         if (ctrl & EMAC_TX_ST_LC)
1516                 ++st->tx_bd_late_collision;
1517         if (ctrl & EMAC_TX_ST_MC)
1518                 ++st->tx_bd_multple_collisions;
1519         if (ctrl & EMAC_TX_ST_SC)
1520                 ++st->tx_bd_single_collision;
1521         if (ctrl & EMAC_TX_ST_UR)
1522                 ++st->tx_bd_underrun;
1523         if (ctrl & EMAC_TX_ST_SQE)
1524                 ++st->tx_bd_sqe;
1525 }
1526
1527 static void emac_poll_tx(void *param)
1528 {
1529         struct emac_instance *dev = param;
1530         u32 bad_mask;
1531
1532         DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1533
1534         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1535                 bad_mask = EMAC_IS_BAD_TX_TAH;
1536         else
1537                 bad_mask = EMAC_IS_BAD_TX;
1538
1539         netif_tx_lock_bh(dev->ndev);
1540         if (dev->tx_cnt) {
1541                 u16 ctrl;
1542                 int slot = dev->ack_slot, n = 0;
1543         again:
1544                 ctrl = dev->tx_desc[slot].ctrl;
1545                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1546                         struct sk_buff *skb = dev->tx_skb[slot];
1547                         ++n;
1548
1549                         if (skb) {
1550                                 dev_kfree_skb(skb);
1551                                 dev->tx_skb[slot] = NULL;
1552                         }
1553                         slot = (slot + 1) % NUM_TX_BUFF;
1554
1555                         if (unlikely(ctrl & bad_mask))
1556                                 emac_parse_tx_error(dev, ctrl);
1557
1558                         if (--dev->tx_cnt)
1559                                 goto again;
1560                 }
1561                 if (n) {
1562                         dev->ack_slot = slot;
1563                         if (netif_queue_stopped(dev->ndev) &&
1564                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1565                                 netif_wake_queue(dev->ndev);
1566
1567                         DBG2(dev, "tx %d pkts" NL, n);
1568                 }
1569         }
1570         netif_tx_unlock_bh(dev->ndev);
1571 }
1572
1573 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1574                                        int len)
1575 {
1576         struct sk_buff *skb = dev->rx_skb[slot];
1577
1578         DBG2(dev, "recycle %d %d" NL, slot, len);
1579
1580         if (len)
1581                 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1582                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1583
1584         dev->rx_desc[slot].data_len = 0;
1585         wmb();
1586         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1587             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1588 }
1589
1590 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1591 {
1592         struct emac_error_stats *st = &dev->estats;
1593
1594         DBG(dev, "BD RX error %04x" NL, ctrl);
1595
1596         ++st->rx_bd_errors;
1597         if (ctrl & EMAC_RX_ST_OE)
1598                 ++st->rx_bd_overrun;
1599         if (ctrl & EMAC_RX_ST_BP)
1600                 ++st->rx_bd_bad_packet;
1601         if (ctrl & EMAC_RX_ST_RP)
1602                 ++st->rx_bd_runt_packet;
1603         if (ctrl & EMAC_RX_ST_SE)
1604                 ++st->rx_bd_short_event;
1605         if (ctrl & EMAC_RX_ST_AE)
1606                 ++st->rx_bd_alignment_error;
1607         if (ctrl & EMAC_RX_ST_BFCS)
1608                 ++st->rx_bd_bad_fcs;
1609         if (ctrl & EMAC_RX_ST_PTL)
1610                 ++st->rx_bd_packet_too_long;
1611         if (ctrl & EMAC_RX_ST_ORE)
1612                 ++st->rx_bd_out_of_range;
1613         if (ctrl & EMAC_RX_ST_IRE)
1614                 ++st->rx_bd_in_range;
1615 }
1616
1617 static inline void emac_rx_csum(struct emac_instance *dev,
1618                                 struct sk_buff *skb, u16 ctrl)
1619 {
1620 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1621         if (!ctrl && dev->tah_dev) {
1622                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1623                 ++dev->stats.rx_packets_csum;
1624         }
1625 #endif
1626 }
1627
1628 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1629 {
1630         if (likely(dev->rx_sg_skb != NULL)) {
1631                 int len = dev->rx_desc[slot].data_len;
1632                 int tot_len = dev->rx_sg_skb->len + len;
1633
1634                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1635                         ++dev->estats.rx_dropped_mtu;
1636                         dev_kfree_skb(dev->rx_sg_skb);
1637                         dev->rx_sg_skb = NULL;
1638                 } else {
1639                         cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1640                                          dev->rx_skb[slot]->data, len);
1641                         skb_put(dev->rx_sg_skb, len);
1642                         emac_recycle_rx_skb(dev, slot, len);
1643                         return 0;
1644                 }
1645         }
1646         emac_recycle_rx_skb(dev, slot, 0);
1647         return -1;
1648 }
1649
1650 /* NAPI poll context */
1651 static int emac_poll_rx(void *param, int budget)
1652 {
1653         struct emac_instance *dev = param;
1654         int slot = dev->rx_slot, received = 0;
1655
1656         DBG2(dev, "poll_rx(%d)" NL, budget);
1657
1658  again:
1659         while (budget > 0) {
1660                 int len;
1661                 struct sk_buff *skb;
1662                 u16 ctrl = dev->rx_desc[slot].ctrl;
1663
1664                 if (ctrl & MAL_RX_CTRL_EMPTY)
1665                         break;
1666
1667                 skb = dev->rx_skb[slot];
1668                 mb();
1669                 len = dev->rx_desc[slot].data_len;
1670
1671                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1672                         goto sg;
1673
1674                 ctrl &= EMAC_BAD_RX_MASK;
1675                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1676                         emac_parse_rx_error(dev, ctrl);
1677                         ++dev->estats.rx_dropped_error;
1678                         emac_recycle_rx_skb(dev, slot, 0);
1679                         len = 0;
1680                         goto next;
1681                 }
1682
1683                 if (len < ETH_HLEN) {
1684                         ++dev->estats.rx_dropped_stack;
1685                         emac_recycle_rx_skb(dev, slot, len);
1686                         goto next;
1687                 }
1688
1689                 if (len && len < EMAC_RX_COPY_THRESH) {
1690                         struct sk_buff *copy_skb =
1691                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1692                         if (unlikely(!copy_skb))
1693                                 goto oom;
1694
1695                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1696                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1697                                          len + 2);
1698                         emac_recycle_rx_skb(dev, slot, len);
1699                         skb = copy_skb;
1700                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1701                         goto oom;
1702
1703                 skb_put(skb, len);
1704         push_packet:
1705                 skb->protocol = eth_type_trans(skb, dev->ndev);
1706                 emac_rx_csum(dev, skb, ctrl);
1707
1708                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1709                         ++dev->estats.rx_dropped_stack;
1710         next:
1711                 ++dev->stats.rx_packets;
1712         skip:
1713                 dev->stats.rx_bytes += len;
1714                 slot = (slot + 1) % NUM_RX_BUFF;
1715                 --budget;
1716                 ++received;
1717                 continue;
1718         sg:
1719                 if (ctrl & MAL_RX_CTRL_FIRST) {
1720                         BUG_ON(dev->rx_sg_skb);
1721                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1722                                 DBG(dev, "rx OOM %d" NL, slot);
1723                                 ++dev->estats.rx_dropped_oom;
1724                                 emac_recycle_rx_skb(dev, slot, 0);
1725                         } else {
1726                                 dev->rx_sg_skb = skb;
1727                                 skb_put(skb, len);
1728                         }
1729                 } else if (!emac_rx_sg_append(dev, slot) &&
1730                            (ctrl & MAL_RX_CTRL_LAST)) {
1731
1732                         skb = dev->rx_sg_skb;
1733                         dev->rx_sg_skb = NULL;
1734
1735                         ctrl &= EMAC_BAD_RX_MASK;
1736                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1737                                 emac_parse_rx_error(dev, ctrl);
1738                                 ++dev->estats.rx_dropped_error;
1739                                 dev_kfree_skb(skb);
1740                                 len = 0;
1741                         } else
1742                                 goto push_packet;
1743                 }
1744                 goto skip;
1745         oom:
1746                 DBG(dev, "rx OOM %d" NL, slot);
1747                 /* Drop the packet and recycle skb */
1748                 ++dev->estats.rx_dropped_oom;
1749                 emac_recycle_rx_skb(dev, slot, 0);
1750                 goto next;
1751         }
1752
1753         if (received) {
1754                 DBG2(dev, "rx %d BDs" NL, received);
1755                 dev->rx_slot = slot;
1756         }
1757
1758         if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1759                 mb();
1760                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1761                         DBG2(dev, "rx restart" NL);
1762                         received = 0;
1763                         goto again;
1764                 }
1765
1766                 if (dev->rx_sg_skb) {
1767                         DBG2(dev, "dropping partial rx packet" NL);
1768                         ++dev->estats.rx_dropped_error;
1769                         dev_kfree_skb(dev->rx_sg_skb);
1770                         dev->rx_sg_skb = NULL;
1771                 }
1772
1773                 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1774                 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1775                 emac_rx_enable(dev);
1776                 dev->rx_slot = 0;
1777         }
1778         return received;
1779 }
1780
1781 /* NAPI poll context */
1782 static int emac_peek_rx(void *param)
1783 {
1784         struct emac_instance *dev = param;
1785
1786         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1787 }
1788
1789 /* NAPI poll context */
1790 static int emac_peek_rx_sg(void *param)
1791 {
1792         struct emac_instance *dev = param;
1793
1794         int slot = dev->rx_slot;
1795         while (1) {
1796                 u16 ctrl = dev->rx_desc[slot].ctrl;
1797                 if (ctrl & MAL_RX_CTRL_EMPTY)
1798                         return 0;
1799                 else if (ctrl & MAL_RX_CTRL_LAST)
1800                         return 1;
1801
1802                 slot = (slot + 1) % NUM_RX_BUFF;
1803
1804                 /* I'm just being paranoid here :) */
1805                 if (unlikely(slot == dev->rx_slot))
1806                         return 0;
1807         }
1808 }
1809
1810 /* Hard IRQ */
1811 static void emac_rxde(void *param)
1812 {
1813         struct emac_instance *dev = param;
1814
1815         ++dev->estats.rx_stopped;
1816         emac_rx_disable_async(dev);
1817 }
1818
1819 /* Hard IRQ */
1820 static irqreturn_t emac_irq(int irq, void *dev_instance)
1821 {
1822         struct emac_instance *dev = dev_instance;
1823         struct emac_regs __iomem *p = dev->emacp;
1824         struct emac_error_stats *st = &dev->estats;
1825         u32 isr;
1826
1827         spin_lock(&dev->lock);
1828
1829         isr = in_be32(&p->isr);
1830         out_be32(&p->isr, isr);
1831
1832         DBG(dev, "isr = %08x" NL, isr);
1833
1834         if (isr & EMAC4_ISR_TXPE)
1835                 ++st->tx_parity;
1836         if (isr & EMAC4_ISR_RXPE)
1837                 ++st->rx_parity;
1838         if (isr & EMAC4_ISR_TXUE)
1839                 ++st->tx_underrun;
1840         if (isr & EMAC4_ISR_RXOE)
1841                 ++st->rx_fifo_overrun;
1842         if (isr & EMAC_ISR_OVR)
1843                 ++st->rx_overrun;
1844         if (isr & EMAC_ISR_BP)
1845                 ++st->rx_bad_packet;
1846         if (isr & EMAC_ISR_RP)
1847                 ++st->rx_runt_packet;
1848         if (isr & EMAC_ISR_SE)
1849                 ++st->rx_short_event;
1850         if (isr & EMAC_ISR_ALE)
1851                 ++st->rx_alignment_error;
1852         if (isr & EMAC_ISR_BFCS)
1853                 ++st->rx_bad_fcs;
1854         if (isr & EMAC_ISR_PTLE)
1855                 ++st->rx_packet_too_long;
1856         if (isr & EMAC_ISR_ORE)
1857                 ++st->rx_out_of_range;
1858         if (isr & EMAC_ISR_IRE)
1859                 ++st->rx_in_range;
1860         if (isr & EMAC_ISR_SQE)
1861                 ++st->tx_sqe;
1862         if (isr & EMAC_ISR_TE)
1863                 ++st->tx_errors;
1864
1865         spin_unlock(&dev->lock);
1866
1867         return IRQ_HANDLED;
1868 }
1869
1870 static struct net_device_stats *emac_stats(struct net_device *ndev)
1871 {
1872         struct emac_instance *dev = netdev_priv(ndev);
1873         struct emac_stats *st = &dev->stats;
1874         struct emac_error_stats *est = &dev->estats;
1875         struct net_device_stats *nst = &dev->nstats;
1876         unsigned long flags;
1877
1878         DBG2(dev, "stats" NL);
1879
1880         /* Compute "legacy" statistics */
1881         spin_lock_irqsave(&dev->lock, flags);
1882         nst->rx_packets = (unsigned long)st->rx_packets;
1883         nst->rx_bytes = (unsigned long)st->rx_bytes;
1884         nst->tx_packets = (unsigned long)st->tx_packets;
1885         nst->tx_bytes = (unsigned long)st->tx_bytes;
1886         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1887                                           est->rx_dropped_error +
1888                                           est->rx_dropped_resize +
1889                                           est->rx_dropped_mtu);
1890         nst->tx_dropped = (unsigned long)est->tx_dropped;
1891
1892         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1893         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1894                                               est->rx_fifo_overrun +
1895                                               est->rx_overrun);
1896         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1897                                                est->rx_alignment_error);
1898         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1899                                              est->rx_bad_fcs);
1900         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1901                                                 est->rx_bd_short_event +
1902                                                 est->rx_bd_packet_too_long +
1903                                                 est->rx_bd_out_of_range +
1904                                                 est->rx_bd_in_range +
1905                                                 est->rx_runt_packet +
1906                                                 est->rx_short_event +
1907                                                 est->rx_packet_too_long +
1908                                                 est->rx_out_of_range +
1909                                                 est->rx_in_range);
1910
1911         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1912         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1913                                               est->tx_underrun);
1914         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1915         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1916                                           est->tx_bd_excessive_collisions +
1917                                           est->tx_bd_late_collision +
1918                                           est->tx_bd_multple_collisions);
1919         spin_unlock_irqrestore(&dev->lock, flags);
1920         return nst;
1921 }
1922
1923 static struct mal_commac_ops emac_commac_ops = {
1924         .poll_tx = &emac_poll_tx,
1925         .poll_rx = &emac_poll_rx,
1926         .peek_rx = &emac_peek_rx,
1927         .rxde = &emac_rxde,
1928 };
1929
1930 static struct mal_commac_ops emac_commac_sg_ops = {
1931         .poll_tx = &emac_poll_tx,
1932         .poll_rx = &emac_poll_rx,
1933         .peek_rx = &emac_peek_rx_sg,
1934         .rxde = &emac_rxde,
1935 };
1936
1937 /* Ethtool support */
1938 static int emac_ethtool_get_settings(struct net_device *ndev,
1939                                      struct ethtool_cmd *cmd)
1940 {
1941         struct emac_instance *dev = netdev_priv(ndev);
1942
1943         cmd->supported = dev->phy.features;
1944         cmd->port = PORT_MII;
1945         cmd->phy_address = dev->phy.address;
1946         cmd->transceiver =
1947             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1948
1949         mutex_lock(&dev->link_lock);
1950         cmd->advertising = dev->phy.advertising;
1951         cmd->autoneg = dev->phy.autoneg;
1952         cmd->speed = dev->phy.speed;
1953         cmd->duplex = dev->phy.duplex;
1954         mutex_unlock(&dev->link_lock);
1955
1956         return 0;
1957 }
1958
1959 static int emac_ethtool_set_settings(struct net_device *ndev,
1960                                      struct ethtool_cmd *cmd)
1961 {
1962         struct emac_instance *dev = netdev_priv(ndev);
1963         u32 f = dev->phy.features;
1964
1965         DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1966             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1967
1968         /* Basic sanity checks */
1969         if (dev->phy.address < 0)
1970                 return -EOPNOTSUPP;
1971         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1972                 return -EINVAL;
1973         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1974                 return -EINVAL;
1975         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1976                 return -EINVAL;
1977
1978         if (cmd->autoneg == AUTONEG_DISABLE) {
1979                 switch (cmd->speed) {
1980                 case SPEED_10:
1981                         if (cmd->duplex == DUPLEX_HALF &&
1982                             !(f & SUPPORTED_10baseT_Half))
1983                                 return -EINVAL;
1984                         if (cmd->duplex == DUPLEX_FULL &&
1985                             !(f & SUPPORTED_10baseT_Full))
1986                                 return -EINVAL;
1987                         break;
1988                 case SPEED_100:
1989                         if (cmd->duplex == DUPLEX_HALF &&
1990                             !(f & SUPPORTED_100baseT_Half))
1991                                 return -EINVAL;
1992                         if (cmd->duplex == DUPLEX_FULL &&
1993                             !(f & SUPPORTED_100baseT_Full))
1994                                 return -EINVAL;
1995                         break;
1996                 case SPEED_1000:
1997                         if (cmd->duplex == DUPLEX_HALF &&
1998                             !(f & SUPPORTED_1000baseT_Half))
1999                                 return -EINVAL;
2000                         if (cmd->duplex == DUPLEX_FULL &&
2001                             !(f & SUPPORTED_1000baseT_Full))
2002                                 return -EINVAL;
2003                         break;
2004                 default:
2005                         return -EINVAL;
2006                 }
2007
2008                 mutex_lock(&dev->link_lock);
2009                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2010                                                 cmd->duplex);
2011                 mutex_unlock(&dev->link_lock);
2012
2013         } else {
2014                 if (!(f & SUPPORTED_Autoneg))
2015                         return -EINVAL;
2016
2017                 mutex_lock(&dev->link_lock);
2018                 dev->phy.def->ops->setup_aneg(&dev->phy,
2019                                               (cmd->advertising & f) |
2020                                               (dev->phy.advertising &
2021                                                (ADVERTISED_Pause |
2022                                                 ADVERTISED_Asym_Pause)));
2023                 mutex_unlock(&dev->link_lock);
2024         }
2025         emac_force_link_update(dev);
2026
2027         return 0;
2028 }
2029
2030 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2031                                        struct ethtool_ringparam *rp)
2032 {
2033         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2034         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2035 }
2036
2037 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2038                                         struct ethtool_pauseparam *pp)
2039 {
2040         struct emac_instance *dev = netdev_priv(ndev);
2041
2042         mutex_lock(&dev->link_lock);
2043         if ((dev->phy.features & SUPPORTED_Autoneg) &&
2044             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2045                 pp->autoneg = 1;
2046
2047         if (dev->phy.duplex == DUPLEX_FULL) {
2048                 if (dev->phy.pause)
2049                         pp->rx_pause = pp->tx_pause = 1;
2050                 else if (dev->phy.asym_pause)
2051                         pp->tx_pause = 1;
2052         }
2053         mutex_unlock(&dev->link_lock);
2054 }
2055
2056 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2057 {
2058         struct emac_instance *dev = netdev_priv(ndev);
2059
2060         return dev->tah_dev != NULL;
2061 }
2062
2063 static int emac_get_regs_len(struct emac_instance *dev)
2064 {
2065         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2066                 return sizeof(struct emac_ethtool_regs_subhdr) +
2067                         EMAC4_ETHTOOL_REGS_SIZE(dev);
2068         else
2069                 return sizeof(struct emac_ethtool_regs_subhdr) +
2070                         EMAC_ETHTOOL_REGS_SIZE(dev);
2071 }
2072
2073 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2074 {
2075         struct emac_instance *dev = netdev_priv(ndev);
2076         int size;
2077
2078         size = sizeof(struct emac_ethtool_regs_hdr) +
2079                 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2080         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2081                 size += zmii_get_regs_len(dev->zmii_dev);
2082         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2083                 size += rgmii_get_regs_len(dev->rgmii_dev);
2084         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2085                 size += tah_get_regs_len(dev->tah_dev);
2086
2087         return size;
2088 }
2089
2090 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2091 {
2092         struct emac_ethtool_regs_subhdr *hdr = buf;
2093
2094         hdr->index = dev->cell_index;
2095         if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2096                 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2097                 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2098                 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
2099         } else {
2100                 hdr->version = EMAC_ETHTOOL_REGS_VER;
2101                 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2102                 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
2103         }
2104 }
2105
2106 static void emac_ethtool_get_regs(struct net_device *ndev,
2107                                   struct ethtool_regs *regs, void *buf)
2108 {
2109         struct emac_instance *dev = netdev_priv(ndev);
2110         struct emac_ethtool_regs_hdr *hdr = buf;
2111
2112         hdr->components = 0;
2113         buf = hdr + 1;
2114
2115         buf = mal_dump_regs(dev->mal, buf);
2116         buf = emac_dump_regs(dev, buf);
2117         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2118                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2119                 buf = zmii_dump_regs(dev->zmii_dev, buf);
2120         }
2121         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2122                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2123                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2124         }
2125         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2126                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2127                 buf = tah_dump_regs(dev->tah_dev, buf);
2128         }
2129 }
2130
2131 static int emac_ethtool_nway_reset(struct net_device *ndev)
2132 {
2133         struct emac_instance *dev = netdev_priv(ndev);
2134         int res = 0;
2135
2136         DBG(dev, "nway_reset" NL);
2137
2138         if (dev->phy.address < 0)
2139                 return -EOPNOTSUPP;
2140
2141         mutex_lock(&dev->link_lock);
2142         if (!dev->phy.autoneg) {
2143                 res = -EINVAL;
2144                 goto out;
2145         }
2146
2147         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2148  out:
2149         mutex_unlock(&dev->link_lock);
2150         emac_force_link_update(dev);
2151         return res;
2152 }
2153
2154 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2155 {
2156         if (stringset == ETH_SS_STATS)
2157                 return EMAC_ETHTOOL_STATS_COUNT;
2158         else
2159                 return -EINVAL;
2160 }
2161
2162 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2163                                      u8 * buf)
2164 {
2165         if (stringset == ETH_SS_STATS)
2166                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2167 }
2168
2169 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2170                                            struct ethtool_stats *estats,
2171                                            u64 * tmp_stats)
2172 {
2173         struct emac_instance *dev = netdev_priv(ndev);
2174
2175         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2176         tmp_stats += sizeof(dev->stats) / sizeof(u64);
2177         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2178 }
2179
2180 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2181                                      struct ethtool_drvinfo *info)
2182 {
2183         struct emac_instance *dev = netdev_priv(ndev);
2184
2185         strcpy(info->driver, "ibm_emac");
2186         strcpy(info->version, DRV_VERSION);
2187         info->fw_version[0] = '\0';
2188         sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2189                 dev->cell_index, dev->ofdev->dev.of_node->full_name);
2190         info->regdump_len = emac_ethtool_get_regs_len(ndev);
2191 }
2192
2193 static const struct ethtool_ops emac_ethtool_ops = {
2194         .get_settings = emac_ethtool_get_settings,
2195         .set_settings = emac_ethtool_set_settings,
2196         .get_drvinfo = emac_ethtool_get_drvinfo,
2197
2198         .get_regs_len = emac_ethtool_get_regs_len,
2199         .get_regs = emac_ethtool_get_regs,
2200
2201         .nway_reset = emac_ethtool_nway_reset,
2202
2203         .get_ringparam = emac_ethtool_get_ringparam,
2204         .get_pauseparam = emac_ethtool_get_pauseparam,
2205
2206         .get_rx_csum = emac_ethtool_get_rx_csum,
2207
2208         .get_strings = emac_ethtool_get_strings,
2209         .get_sset_count = emac_ethtool_get_sset_count,
2210         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2211
2212         .get_link = ethtool_op_get_link,
2213         .get_tx_csum = ethtool_op_get_tx_csum,
2214         .get_sg = ethtool_op_get_sg,
2215 };
2216
2217 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2218 {
2219         struct emac_instance *dev = netdev_priv(ndev);
2220         struct mii_ioctl_data *data = if_mii(rq);
2221
2222         DBG(dev, "ioctl %08x" NL, cmd);
2223
2224         if (dev->phy.address < 0)
2225                 return -EOPNOTSUPP;
2226
2227         switch (cmd) {
2228         case SIOCGMIIPHY:
2229                 data->phy_id = dev->phy.address;
2230                 /* Fall through */
2231         case SIOCGMIIREG:
2232                 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2233                                                data->reg_num);
2234                 return 0;
2235
2236         case SIOCSMIIREG:
2237                 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2238                                 data->val_in);
2239                 return 0;
2240         default:
2241                 return -EOPNOTSUPP;
2242         }
2243 }
2244
2245 struct emac_depentry {
2246         u32                     phandle;
2247         struct device_node      *node;
2248         struct of_device        *ofdev;
2249         void                    *drvdata;
2250 };
2251
2252 #define EMAC_DEP_MAL_IDX        0
2253 #define EMAC_DEP_ZMII_IDX       1
2254 #define EMAC_DEP_RGMII_IDX      2
2255 #define EMAC_DEP_TAH_IDX        3
2256 #define EMAC_DEP_MDIO_IDX       4
2257 #define EMAC_DEP_PREV_IDX       5
2258 #define EMAC_DEP_COUNT          6
2259
2260 static int __devinit emac_check_deps(struct emac_instance *dev,
2261                                      struct emac_depentry *deps)
2262 {
2263         int i, there = 0;
2264         struct device_node *np;
2265
2266         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2267                 /* no dependency on that item, allright */
2268                 if (deps[i].phandle == 0) {
2269                         there++;
2270                         continue;
2271                 }
2272                 /* special case for blist as the dependency might go away */
2273                 if (i == EMAC_DEP_PREV_IDX) {
2274                         np = *(dev->blist - 1);
2275                         if (np == NULL) {
2276                                 deps[i].phandle = 0;
2277                                 there++;
2278                                 continue;
2279                         }
2280                         if (deps[i].node == NULL)
2281                                 deps[i].node = of_node_get(np);
2282                 }
2283                 if (deps[i].node == NULL)
2284                         deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2285                 if (deps[i].node == NULL)
2286                         continue;
2287                 if (deps[i].ofdev == NULL)
2288                         deps[i].ofdev = of_find_device_by_node(deps[i].node);
2289                 if (deps[i].ofdev == NULL)
2290                         continue;
2291                 if (deps[i].drvdata == NULL)
2292                         deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2293                 if (deps[i].drvdata != NULL)
2294                         there++;
2295         }
2296         return (there == EMAC_DEP_COUNT);
2297 }
2298
2299 static void emac_put_deps(struct emac_instance *dev)
2300 {
2301         if (dev->mal_dev)
2302                 of_dev_put(dev->mal_dev);
2303         if (dev->zmii_dev)
2304                 of_dev_put(dev->zmii_dev);
2305         if (dev->rgmii_dev)
2306                 of_dev_put(dev->rgmii_dev);
2307         if (dev->mdio_dev)
2308                 of_dev_put(dev->mdio_dev);
2309         if (dev->tah_dev)
2310                 of_dev_put(dev->tah_dev);
2311 }
2312
2313 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2314                                         unsigned long action, void *data)
2315 {
2316         /* We are only intereted in device addition */
2317         if (action == BUS_NOTIFY_BOUND_DRIVER)
2318                 wake_up_all(&emac_probe_wait);
2319         return 0;
2320 }
2321
2322 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2323         .notifier_call = emac_of_bus_notify
2324 };
2325
2326 static int __devinit emac_wait_deps(struct emac_instance *dev)
2327 {
2328         struct emac_depentry deps[EMAC_DEP_COUNT];
2329         int i, err;
2330
2331         memset(&deps, 0, sizeof(deps));
2332
2333         deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2334         deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2335         deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2336         if (dev->tah_ph)
2337                 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2338         if (dev->mdio_ph)
2339                 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2340         if (dev->blist && dev->blist > emac_boot_list)
2341                 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2342         bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2343         wait_event_timeout(emac_probe_wait,
2344                            emac_check_deps(dev, deps),
2345                            EMAC_PROBE_DEP_TIMEOUT);
2346         bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2347         err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2348         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2349                 if (deps[i].node)
2350                         of_node_put(deps[i].node);
2351                 if (err && deps[i].ofdev)
2352                         of_dev_put(deps[i].ofdev);
2353         }
2354         if (err == 0) {
2355                 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2356                 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2357                 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2358                 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2359                 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2360         }
2361         if (deps[EMAC_DEP_PREV_IDX].ofdev)
2362                 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2363         return err;
2364 }
2365
2366 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2367                                          u32 *val, int fatal)
2368 {
2369         int len;
2370         const u32 *prop = of_get_property(np, name, &len);
2371         if (prop == NULL || len < sizeof(u32)) {
2372                 if (fatal)
2373                         printk(KERN_ERR "%s: missing %s property\n",
2374                                np->full_name, name);
2375                 return -ENODEV;
2376         }
2377         *val = *prop;
2378         return 0;
2379 }
2380
2381 static int __devinit emac_init_phy(struct emac_instance *dev)
2382 {
2383         struct device_node *np = dev->ofdev->dev.of_node;
2384         struct net_device *ndev = dev->ndev;
2385         u32 phy_map, adv;
2386         int i;
2387
2388         dev->phy.dev = ndev;
2389         dev->phy.mode = dev->phy_mode;
2390
2391         /* PHY-less configuration.
2392          * XXX I probably should move these settings to the dev tree
2393          */
2394         if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2395                 emac_reset(dev);
2396
2397                 /* PHY-less configuration.
2398                  * XXX I probably should move these settings to the dev tree
2399                  */
2400                 dev->phy.address = -1;
2401                 dev->phy.features = SUPPORTED_MII;
2402                 if (emac_phy_supports_gige(dev->phy_mode))
2403                         dev->phy.features |= SUPPORTED_1000baseT_Full;
2404                 else
2405                         dev->phy.features |= SUPPORTED_100baseT_Full;
2406                 dev->phy.pause = 1;
2407
2408                 return 0;
2409         }
2410
2411         mutex_lock(&emac_phy_map_lock);
2412         phy_map = dev->phy_map | busy_phy_map;
2413
2414         DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2415
2416         dev->phy.mdio_read = emac_mdio_read;
2417         dev->phy.mdio_write = emac_mdio_write;
2418
2419         /* Enable internal clock source */
2420 #ifdef CONFIG_PPC_DCR_NATIVE
2421         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2422                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2423 #endif
2424         /* PHY clock workaround */
2425         emac_rx_clk_tx(dev);
2426
2427         /* Enable internal clock source on 440GX*/
2428 #ifdef CONFIG_PPC_DCR_NATIVE
2429         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2430                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2431 #endif
2432         /* Configure EMAC with defaults so we can at least use MDIO
2433          * This is needed mostly for 440GX
2434          */
2435         if (emac_phy_gpcs(dev->phy.mode)) {
2436                 /* XXX
2437                  * Make GPCS PHY address equal to EMAC index.
2438                  * We probably should take into account busy_phy_map
2439                  * and/or phy_map here.
2440                  *
2441                  * Note that the busy_phy_map is currently global
2442                  * while it should probably be per-ASIC...
2443                  */
2444                 dev->phy.gpcs_address = dev->gpcs_address;
2445                 if (dev->phy.gpcs_address == 0xffffffff)
2446                         dev->phy.address = dev->cell_index;
2447         }
2448
2449         emac_configure(dev);
2450
2451         if (dev->phy_address != 0xffffffff)
2452                 phy_map = ~(1 << dev->phy_address);
2453
2454         for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2455                 if (!(phy_map & 1)) {
2456                         int r;
2457                         busy_phy_map |= 1 << i;
2458
2459                         /* Quick check if there is a PHY at the address */
2460                         r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2461                         if (r == 0xffff || r < 0)
2462                                 continue;
2463                         if (!emac_mii_phy_probe(&dev->phy, i))
2464                                 break;
2465                 }
2466
2467         /* Enable external clock source */
2468 #ifdef CONFIG_PPC_DCR_NATIVE
2469         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2470                 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2471 #endif
2472         mutex_unlock(&emac_phy_map_lock);
2473         if (i == 0x20) {
2474                 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2475                 return -ENXIO;
2476         }
2477
2478         /* Init PHY */
2479         if (dev->phy.def->ops->init)
2480                 dev->phy.def->ops->init(&dev->phy);
2481
2482         /* Disable any PHY features not supported by the platform */
2483         dev->phy.def->features &= ~dev->phy_feat_exc;
2484
2485         /* Setup initial link parameters */
2486         if (dev->phy.features & SUPPORTED_Autoneg) {
2487                 adv = dev->phy.features;
2488                 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2489                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2490                 /* Restart autonegotiation */
2491                 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2492         } else {
2493                 u32 f = dev->phy.def->features;
2494                 int speed = SPEED_10, fd = DUPLEX_HALF;
2495
2496                 /* Select highest supported speed/duplex */
2497                 if (f & SUPPORTED_1000baseT_Full) {
2498                         speed = SPEED_1000;
2499                         fd = DUPLEX_FULL;
2500                 } else if (f & SUPPORTED_1000baseT_Half)
2501                         speed = SPEED_1000;
2502                 else if (f & SUPPORTED_100baseT_Full) {
2503                         speed = SPEED_100;
2504                         fd = DUPLEX_FULL;
2505                 } else if (f & SUPPORTED_100baseT_Half)
2506                         speed = SPEED_100;
2507                 else if (f & SUPPORTED_10baseT_Full)
2508                         fd = DUPLEX_FULL;
2509
2510                 /* Force link parameters */
2511                 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2512         }
2513         return 0;
2514 }
2515
2516 static int __devinit emac_init_config(struct emac_instance *dev)
2517 {
2518         struct device_node *np = dev->ofdev->dev.of_node;
2519         const void *p;
2520         unsigned int plen;
2521         const char *pm, *phy_modes[] = {
2522                 [PHY_MODE_NA] = "",
2523                 [PHY_MODE_MII] = "mii",
2524                 [PHY_MODE_RMII] = "rmii",
2525                 [PHY_MODE_SMII] = "smii",
2526                 [PHY_MODE_RGMII] = "rgmii",
2527                 [PHY_MODE_TBI] = "tbi",
2528                 [PHY_MODE_GMII] = "gmii",
2529                 [PHY_MODE_RTBI] = "rtbi",
2530                 [PHY_MODE_SGMII] = "sgmii",
2531         };
2532
2533         /* Read config from device-tree */
2534         if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2535                 return -ENXIO;
2536         if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2537                 return -ENXIO;
2538         if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2539                 return -ENXIO;
2540         if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2541                 return -ENXIO;
2542         if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2543                 dev->max_mtu = 1500;
2544         if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2545                 dev->rx_fifo_size = 2048;
2546         if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2547                 dev->tx_fifo_size = 2048;
2548         if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2549                 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2550         if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2551                 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2552         if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2553                 dev->phy_address = 0xffffffff;
2554         if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2555                 dev->phy_map = 0xffffffff;
2556         if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2557                 dev->gpcs_address = 0xffffffff;
2558         if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2559                 return -ENXIO;
2560         if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2561                 dev->tah_ph = 0;
2562         if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2563                 dev->tah_port = 0;
2564         if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2565                 dev->mdio_ph = 0;
2566         if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2567                 dev->zmii_ph = 0;
2568         if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2569                 dev->zmii_port = 0xffffffff;
2570         if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2571                 dev->rgmii_ph = 0;
2572         if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2573                 dev->rgmii_port = 0xffffffff;
2574         if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2575                 dev->fifo_entry_size = 16;
2576         if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2577                 dev->mal_burst_size = 256;
2578
2579         /* PHY mode needs some decoding */
2580         dev->phy_mode = PHY_MODE_NA;
2581         pm = of_get_property(np, "phy-mode", &plen);
2582         if (pm != NULL) {
2583                 int i;
2584                 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2585                         if (!strcasecmp(pm, phy_modes[i])) {
2586                                 dev->phy_mode = i;
2587                                 break;
2588                         }
2589         }
2590
2591         /* Backward compat with non-final DT */
2592         if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2593                 u32 nmode = *(const u32 *)pm;
2594                 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2595                         dev->phy_mode = nmode;
2596         }
2597
2598         /* Check EMAC version */
2599         if (of_device_is_compatible(np, "ibm,emac4sync")) {
2600                 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2601                 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2602                     of_device_is_compatible(np, "ibm,emac-460gt"))
2603                         dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2604                 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2605                     of_device_is_compatible(np, "ibm,emac-405exr"))
2606                         dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2607         } else if (of_device_is_compatible(np, "ibm,emac4")) {
2608                 dev->features |= EMAC_FTR_EMAC4;
2609                 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2610                         dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2611         } else {
2612                 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2613                     of_device_is_compatible(np, "ibm,emac-440gr"))
2614                         dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2615                 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2616 #ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
2617                         dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2618 #else
2619                         printk(KERN_ERR "%s: Flow control not disabled!\n",
2620                                         np->full_name);
2621                         return -ENXIO;
2622 #endif
2623                 }
2624
2625         }
2626
2627         /* Fixup some feature bits based on the device tree */
2628         if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2629                 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2630         if (of_get_property(np, "has-new-stacr-staopc", NULL))
2631                 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2632
2633         /* CAB lacks the appropriate properties */
2634         if (of_device_is_compatible(np, "ibm,emac-axon"))
2635                 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2636                         EMAC_FTR_STACR_OC_INVERT;
2637
2638         /* Enable TAH/ZMII/RGMII features as found */
2639         if (dev->tah_ph != 0) {
2640 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2641                 dev->features |= EMAC_FTR_HAS_TAH;
2642 #else
2643                 printk(KERN_ERR "%s: TAH support not enabled !\n",
2644                        np->full_name);
2645                 return -ENXIO;
2646 #endif
2647         }
2648
2649         if (dev->zmii_ph != 0) {
2650 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2651                 dev->features |= EMAC_FTR_HAS_ZMII;
2652 #else
2653                 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2654                        np->full_name);
2655                 return -ENXIO;
2656 #endif
2657         }
2658
2659         if (dev->rgmii_ph != 0) {
2660 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2661                 dev->features |= EMAC_FTR_HAS_RGMII;
2662 #else
2663                 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2664                        np->full_name);
2665                 return -ENXIO;
2666 #endif
2667         }
2668
2669         /* Read MAC-address */
2670         p = of_get_property(np, "local-mac-address", NULL);
2671         if (p == NULL) {
2672                 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2673                        np->full_name);
2674                 return -ENXIO;
2675         }
2676         memcpy(dev->ndev->dev_addr, p, 6);
2677
2678         /* IAHT and GAHT filter parameterization */
2679         if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2680                 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2681                 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2682         } else {
2683                 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2684                 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2685         }
2686
2687         DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2688         DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2689         DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2690         DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2691         DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2692
2693         return 0;
2694 }
2695
2696 static const struct net_device_ops emac_netdev_ops = {
2697         .ndo_open               = emac_open,
2698         .ndo_stop               = emac_close,
2699         .ndo_get_stats          = emac_stats,
2700         .ndo_set_multicast_list = emac_set_multicast_list,
2701         .ndo_do_ioctl           = emac_ioctl,
2702         .ndo_tx_timeout         = emac_tx_timeout,
2703         .ndo_validate_addr      = eth_validate_addr,
2704         .ndo_set_mac_address    = eth_mac_addr,
2705         .ndo_start_xmit         = emac_start_xmit,
2706         .ndo_change_mtu         = eth_change_mtu,
2707 };
2708
2709 static const struct net_device_ops emac_gige_netdev_ops = {
2710         .ndo_open               = emac_open,
2711         .ndo_stop               = emac_close,
2712         .ndo_get_stats          = emac_stats,
2713         .ndo_set_multicast_list = emac_set_multicast_list,
2714         .ndo_do_ioctl           = emac_ioctl,
2715         .ndo_tx_timeout         = emac_tx_timeout,
2716         .ndo_validate_addr      = eth_validate_addr,
2717         .ndo_set_mac_address    = eth_mac_addr,
2718         .ndo_start_xmit         = emac_start_xmit_sg,
2719         .ndo_change_mtu         = emac_change_mtu,
2720 };
2721
2722 static int __devinit emac_probe(struct of_device *ofdev,
2723                                 const struct of_device_id *match)
2724 {
2725         struct net_device *ndev;
2726         struct emac_instance *dev;
2727         struct device_node *np = ofdev->dev.of_node;
2728         struct device_node **blist = NULL;
2729         int err, i;
2730
2731         /* Skip unused/unwired EMACS.  We leave the check for an unused
2732          * property here for now, but new flat device trees should set a
2733          * status property to "disabled" instead.
2734          */
2735         if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2736                 return -ENODEV;
2737
2738         /* Find ourselves in the bootlist if we are there */
2739         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2740                 if (emac_boot_list[i] == np)
2741                         blist = &emac_boot_list[i];
2742
2743         /* Allocate our net_device structure */
2744         err = -ENOMEM;
2745         ndev = alloc_etherdev(sizeof(struct emac_instance));
2746         if (!ndev) {
2747                 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2748                        np->full_name);
2749                 goto err_gone;
2750         }
2751         dev = netdev_priv(ndev);
2752         dev->ndev = ndev;
2753         dev->ofdev = ofdev;
2754         dev->blist = blist;
2755         SET_NETDEV_DEV(ndev, &ofdev->dev);
2756
2757         /* Initialize some embedded data structures */
2758         mutex_init(&dev->mdio_lock);
2759         mutex_init(&dev->link_lock);
2760         spin_lock_init(&dev->lock);
2761         INIT_WORK(&dev->reset_work, emac_reset_work);
2762
2763         /* Init various config data based on device-tree */
2764         err = emac_init_config(dev);
2765         if (err != 0)
2766                 goto err_free;
2767
2768         /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2769         dev->emac_irq = irq_of_parse_and_map(np, 0);
2770         dev->wol_irq = irq_of_parse_and_map(np, 1);
2771         if (dev->emac_irq == NO_IRQ) {
2772                 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2773                 goto err_free;
2774         }
2775         ndev->irq = dev->emac_irq;
2776
2777         /* Map EMAC regs */
2778         if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2779                 printk(KERN_ERR "%s: Can't get registers address\n",
2780                        np->full_name);
2781                 goto err_irq_unmap;
2782         }
2783         // TODO : request_mem_region
2784         dev->emacp = ioremap(dev->rsrc_regs.start,
2785                              dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2786         if (dev->emacp == NULL) {
2787                 printk(KERN_ERR "%s: Can't map device registers!\n",
2788                        np->full_name);
2789                 err = -ENOMEM;
2790                 goto err_irq_unmap;
2791         }
2792
2793         /* Wait for dependent devices */
2794         err = emac_wait_deps(dev);
2795         if (err) {
2796                 printk(KERN_ERR
2797                        "%s: Timeout waiting for dependent devices\n",
2798                        np->full_name);
2799                 /*  display more info about what's missing ? */
2800                 goto err_reg_unmap;
2801         }
2802         dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2803         if (dev->mdio_dev != NULL)
2804                 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2805
2806         /* Register with MAL */
2807         dev->commac.ops = &emac_commac_ops;
2808         dev->commac.dev = dev;
2809         dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2810         dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2811         err = mal_register_commac(dev->mal, &dev->commac);
2812         if (err) {
2813                 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2814                        np->full_name, dev->mal_dev->dev.of_node->full_name);
2815                 goto err_rel_deps;
2816         }
2817         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2818         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2819
2820         /* Get pointers to BD rings */
2821         dev->tx_desc =
2822             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2823         dev->rx_desc =
2824             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2825
2826         DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2827         DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2828
2829         /* Clean rings */
2830         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2831         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2832         memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2833         memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2834
2835         /* Attach to ZMII, if needed */
2836         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2837             (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2838                 goto err_unreg_commac;
2839
2840         /* Attach to RGMII, if needed */
2841         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2842             (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2843                 goto err_detach_zmii;
2844
2845         /* Attach to TAH, if needed */
2846         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2847             (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2848                 goto err_detach_rgmii;
2849
2850         /* Set some link defaults before we can find out real parameters */
2851         dev->phy.speed = SPEED_100;
2852         dev->phy.duplex = DUPLEX_FULL;
2853         dev->phy.autoneg = AUTONEG_DISABLE;
2854         dev->phy.pause = dev->phy.asym_pause = 0;
2855         dev->stop_timeout = STOP_TIMEOUT_100;
2856         INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2857
2858         /* Find PHY if any */
2859         err = emac_init_phy(dev);
2860         if (err != 0)
2861                 goto err_detach_tah;
2862
2863         if (dev->tah_dev)
2864                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2865         ndev->watchdog_timeo = 5 * HZ;
2866         if (emac_phy_supports_gige(dev->phy_mode)) {
2867                 ndev->netdev_ops = &emac_gige_netdev_ops;
2868                 dev->commac.ops = &emac_commac_sg_ops;
2869         } else
2870                 ndev->netdev_ops = &emac_netdev_ops;
2871         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2872
2873         netif_carrier_off(ndev);
2874         netif_stop_queue(ndev);
2875
2876         err = register_netdev(ndev);
2877         if (err) {
2878                 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2879                        np->full_name, err);
2880                 goto err_detach_tah;
2881         }
2882
2883         /* Set our drvdata last as we don't want them visible until we are
2884          * fully initialized
2885          */
2886         wmb();
2887         dev_set_drvdata(&ofdev->dev, dev);
2888
2889         /* There's a new kid in town ! Let's tell everybody */
2890         wake_up_all(&emac_probe_wait);
2891
2892
2893         printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2894                ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2895
2896         if (dev->phy_mode == PHY_MODE_SGMII)
2897                 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2898
2899         if (dev->phy.address >= 0)
2900                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2901                        dev->phy.def->name, dev->phy.address);
2902
2903         emac_dbg_register(dev);
2904
2905         /* Life is good */
2906         return 0;
2907
2908         /* I have a bad feeling about this ... */
2909
2910  err_detach_tah:
2911         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2912                 tah_detach(dev->tah_dev, dev->tah_port);
2913  err_detach_rgmii:
2914         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2915                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2916  err_detach_zmii:
2917         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2918                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2919  err_unreg_commac:
2920         mal_unregister_commac(dev->mal, &dev->commac);
2921  err_rel_deps:
2922         emac_put_deps(dev);
2923  err_reg_unmap:
2924         iounmap(dev->emacp);
2925  err_irq_unmap:
2926         if (dev->wol_irq != NO_IRQ)
2927                 irq_dispose_mapping(dev->wol_irq);
2928         if (dev->emac_irq != NO_IRQ)
2929                 irq_dispose_mapping(dev->emac_irq);
2930  err_free:
2931         kfree(ndev);
2932  err_gone:
2933         /* if we were on the bootlist, remove us as we won't show up and
2934          * wake up all waiters to notify them in case they were waiting
2935          * on us
2936          */
2937         if (blist) {
2938                 *blist = NULL;
2939                 wake_up_all(&emac_probe_wait);
2940         }
2941         return err;
2942 }
2943
2944 static int __devexit emac_remove(struct of_device *ofdev)
2945 {
2946         struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2947
2948         DBG(dev, "remove" NL);
2949
2950         dev_set_drvdata(&ofdev->dev, NULL);
2951
2952         unregister_netdev(dev->ndev);
2953
2954         flush_scheduled_work();
2955
2956         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2957                 tah_detach(dev->tah_dev, dev->tah_port);
2958         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2959                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2960         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2961                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2962
2963         mal_unregister_commac(dev->mal, &dev->commac);
2964         emac_put_deps(dev);
2965
2966         emac_dbg_unregister(dev);
2967         iounmap(dev->emacp);
2968
2969         if (dev->wol_irq != NO_IRQ)
2970                 irq_dispose_mapping(dev->wol_irq);
2971         if (dev->emac_irq != NO_IRQ)
2972                 irq_dispose_mapping(dev->emac_irq);
2973
2974         kfree(dev->ndev);
2975
2976         return 0;
2977 }
2978
2979 /* XXX Features in here should be replaced by properties... */
2980 static struct of_device_id emac_match[] =
2981 {
2982         {
2983                 .type           = "network",
2984                 .compatible     = "ibm,emac",
2985         },
2986         {
2987                 .type           = "network",
2988                 .compatible     = "ibm,emac4",
2989         },
2990         {
2991                 .type           = "network",
2992                 .compatible     = "ibm,emac4sync",
2993         },
2994         {},
2995 };
2996 MODULE_DEVICE_TABLE(of, emac_match);
2997
2998 static struct of_platform_driver emac_driver = {
2999         .driver = {
3000                 .name = "emac",
3001                 .owner = THIS_MODULE,
3002                 .of_match_table = emac_match,
3003         },
3004         .probe = emac_probe,
3005         .remove = emac_remove,
3006 };
3007
3008 static void __init emac_make_bootlist(void)
3009 {
3010         struct device_node *np = NULL;
3011         int j, max, i = 0, k;
3012         int cell_indices[EMAC_BOOT_LIST_SIZE];
3013
3014         /* Collect EMACs */
3015         while((np = of_find_all_nodes(np)) != NULL) {
3016                 const u32 *idx;
3017
3018                 if (of_match_node(emac_match, np) == NULL)
3019                         continue;
3020                 if (of_get_property(np, "unused", NULL))
3021                         continue;
3022                 idx = of_get_property(np, "cell-index", NULL);
3023                 if (idx == NULL)
3024                         continue;
3025                 cell_indices[i] = *idx;
3026                 emac_boot_list[i++] = of_node_get(np);
3027                 if (i >= EMAC_BOOT_LIST_SIZE) {
3028                         of_node_put(np);
3029                         break;
3030                 }
3031         }
3032         max = i;
3033
3034         /* Bubble sort them (doh, what a creative algorithm :-) */
3035         for (i = 0; max > 1 && (i < (max - 1)); i++)
3036                 for (j = i; j < max; j++) {
3037                         if (cell_indices[i] > cell_indices[j]) {
3038                                 np = emac_boot_list[i];
3039                                 emac_boot_list[i] = emac_boot_list[j];
3040                                 emac_boot_list[j] = np;
3041                                 k = cell_indices[i];
3042                                 cell_indices[i] = cell_indices[j];
3043                                 cell_indices[j] = k;
3044                         }
3045                 }
3046 }
3047
3048 static int __init emac_init(void)
3049 {
3050         int rc;
3051
3052         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3053
3054         /* Init debug stuff */
3055         emac_init_debug();
3056
3057         /* Build EMAC boot list */
3058         emac_make_bootlist();
3059
3060         /* Init submodules */
3061         rc = mal_init();
3062         if (rc)
3063                 goto err;
3064         rc = zmii_init();
3065         if (rc)
3066                 goto err_mal;
3067         rc = rgmii_init();
3068         if (rc)
3069                 goto err_zmii;
3070         rc = tah_init();
3071         if (rc)
3072                 goto err_rgmii;
3073         rc = of_register_platform_driver(&emac_driver);
3074         if (rc)
3075                 goto err_tah;
3076
3077         return 0;
3078
3079  err_tah:
3080         tah_exit();
3081  err_rgmii:
3082         rgmii_exit();
3083  err_zmii:
3084         zmii_exit();
3085  err_mal:
3086         mal_exit();
3087  err:
3088         return rc;
3089 }
3090
3091 static void __exit emac_exit(void)
3092 {
3093         int i;
3094
3095         of_unregister_platform_driver(&emac_driver);
3096
3097         tah_exit();
3098         rgmii_exit();
3099         zmii_exit();
3100         mal_exit();
3101         emac_fini_debug();
3102
3103         /* Destroy EMAC boot list */
3104         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3105                 if (emac_boot_list[i])
3106                         of_node_put(emac_boot_list[i]);
3107 }
3108
3109 module_init(emac_init);
3110 module_exit(emac_exit);